541 Commits

Author SHA1 Message Date
Sam Wenham
43dfe946b6 Just removing dud whitespace 2020-02-24 15:04:07 +00:00
Sam Wenham
656ddcfe93 Merge branch 'django-1.10' of ssh://expo.survex.com/~/troggle into django-1.10 2020-02-22 15:45:20 +00:00
Sam Wenham
505bc48331 Show coordinates for entrance
Use filter to find coordinates
2020-02-22 15:38:22 +00:00
Sam Wenham
92b273e45f Whitespace cleanup 2020-02-21 14:26:14 +00:00
Sam Wenham
978270b152 Remove dud settings
allow any site for dev
2020-02-21 14:25:13 +00:00
Sam Wenham
291e3baabf Get media working (at least in development) 2020-02-21 14:19:37 +00:00
Sam Wenham
eb5406f325 Add django migrations. These are needed on newer django installs to maintain the database 2020-02-20 11:55:35 +00:00
Sam Wenham
de22b071b0 Improve README
Make new style QMs from survexfiles work
2019-07-19 01:04:18 +01:00
Sam Wenham
08a41941f9 Part one of getting troggle to work with django 1.10
Major rework of how survex is processed
2019-07-16 00:07:37 +01:00
Sam Wenham
a26109cb30 Allow comments against names in logbooks in brackets
Convert accent chars in names into simple chars as this is what people enter in the logbook
2019-07-11 12:29:38 +01:00
Sam Wenham
6b5b9a5315 Merge branch 'master' of ssh://expo.survex.com/~/troggle 2019-07-10 12:37:38 +01:00
Sam Wenham
4ebf3d8a0e Bring back TinyMCE for editing flatpages 2019-07-10 12:32:04 +01:00
37d02b298d added ssh git clone command variant 2019-07-09 15:55:27 +01:00
Sam Wenham
d6053322e8 Merge branch 'master' of ssh://expo.survex.com/~/troggle 2019-07-09 10:41:21 +01:00
Expo on server
5b5f385b67 Remove .hgignore Change mode on modelvis.py 2019-07-09 05:23:37 +01:00
Sam Wenham
04428c45c9 Fix description of localsettingsdocker 2019-07-09 05:23:30 +01:00
a7f605ced9 changes because we do not use svn anymore
Signed-off-by: psargent <philip.sargent@gmail.com>
2019-07-09 05:23:22 +01:00
Expo on server
0adb8e528d Add .gitignore file 2019-07-09 05:23:14 +01:00
Expo on server
f4280f9907 Add info to debian instructions on creating troggle logfile (in /var/log) 2019-07-09 05:22:49 +01:00
Expo on server
9266e5460e Add .gitignore file 2019-06-27 00:23:22 +01:00
Expo on server
ad45859071 Add info to debian instructions on creating troggle logfile (in /var/log) 2019-06-27 00:14:39 +01:00
expo on server
ee759980c4 remove hack in logbook parsing to convert ol to olly and wook to wookey.
It broke 'Olaf' as a name, for example.
2019-06-26 21:46:57 +01:00
expo on server
18b371bc15 remove hack in logbook parsing to convert ol to olly and wook to wookey.
It broke 'Olaf' as a name, for example.
2019-06-26 21:46:57 +01:00
expo on server
9e77b8bb75 Add server setup instructions/recipie for Debian Stretch 2019-06-26 21:45:17 +01:00
expo on server
e6acd4bdbd Add server setup instructions/recipie for Debian Stretch 2019-06-26 21:45:17 +01:00
Sam Wenham
424219fb6f Just commit the logbook parser this time (can we move to git now!!!) 2019-06-26 21:21:37 +01:00
Sam Wenham
2ebb37552f Just commit the logbook parser this time (can we move to git now!!!) 2019-06-26 21:21:37 +01:00
Sam Wenham
822359fe51 Backed out changeset: 4552f42bdf54 2019-06-26 20:57:24 +01:00
Sam Wenham
97426a0ddb Backed out changeset: 4552f42bdf54 2019-06-26 20:57:24 +01:00
Sam Wenham
3f78382d45 Remove this stupid hard coded name match 2019-06-26 20:56:08 +01:00
Sam Wenham
8a1be45aac Remove this stupid hard coded name match 2019-06-26 20:56:08 +01:00
Sam Wenham
b5cca8be3b Merge 2019-06-26 18:43:42 +01:00
Sam Wenham
4d2f9a2b39 Merge 2019-06-26 18:43:42 +01:00
Sam Wenham
8fe02e5c89 Allow html chars in names 2019-06-26 18:36:08 +01:00
Sam Wenham
b2dd905f0e Allow html chars in names 2019-06-26 18:36:08 +01:00
expo on server
c06d372984 Add expo.survex.com to ALLOWED_HOSTS in troggle settings 2019-06-26 15:23:20 +01:00
expo on server
7a9aef6faf Add expo.survex.com to ALLOWED_HOSTS in troggle settings 2019-06-26 15:23:20 +01:00
expo on server
6889ae9fa3 Add SURVEX_TOPNAME (top-level survex file) as a setting item in settings.py so it's not hardcoded. 2019-06-26 03:32:18 +01:00
expo on server
02d3cc84d5 Add SURVEX_TOPNAME (top-level survex file) as a setting item in settings.py so it's not hardcoded. 2019-06-26 03:32:18 +01:00
Sam Wenham
768ec83037 Updating caves and entrances is no longer nuclear!
Big overhaul of people processing, fullname added to the model
lastname is now names -1 unless you only have one (yes you Wookey)
this allows for Jon Arne Toft and Wookey to live it the same DB
names can now have html chars in them, this should be real unicode but that can
only happen when we go to Python 3!
2019-04-19 22:52:54 +01:00
Sam Wenham
b42249890e Updating caves and entrances is no longer nuclear!
Big overhaul of people processing, fullname added to the model
lastname is now names -1 unless you only have one (yes you Wookey)
this allows for Jon Arne Toft and Wookey to live it the same DB
names can now have html chars in them, this should be real unicode but that can
only happen when we go to Python 3!
2019-04-19 22:52:54 +01:00
Sam Wenham
2f9870644b missed objects 2019-04-18 19:27:23 +01:00
Sam Wenham
cc313246bb missed objects 2019-04-18 19:27:23 +01:00
Sam Wenham
4e187581b3 Clear data issues for logbooks before reloading 2019-04-18 19:26:09 +01:00
Sam Wenham
bfe018cde6 Clear data issues for logbooks before reloading 2019-04-18 19:26:09 +01:00
Sam Wenham
dc479b33c5 Add ordering to the data issues model 2019-04-18 19:01:29 +01:00
Sam Wenham
ae284a1f30 Add ordering to the data issues model 2019-04-18 19:01:29 +01:00
Sam Wenham
f1736c53c4 Fix CSRF issues in svx form
Set date formats
Add DataIssue model and add errors to it to allow us to give people a list of
stuff to fix
2019-04-14 22:45:31 +01:00
Sam Wenham
23df89cf31 Fix CSRF issues in svx form
Set date formats
Add DataIssue model and add errors to it to allow us to give people a list of
stuff to fix
2019-04-14 22:45:31 +01:00
Sam Wenham
05c5e26e99 Sort people by notability
Better errors and tidy
Nicer date formats
2019-04-02 02:04:38 +01:00
Sam Wenham
d1d0c24ed8 Sort people by notability
Better errors and tidy
Nicer date formats
2019-04-02 02:04:38 +01:00
Wookey
c4301cf6df Merge lots of troggle fixes 2019-04-02 00:57:54 +01:00
Wookey
b3089fafe9 Merge lots of troggle fixes 2019-04-02 00:57:54 +01:00
Wookey
de7d68b1eb folk.csv has moved into 'folk' dir out of 'noinfo' 2019-04-02 00:57:13 +01:00
Wookey
e913a56a6b folk.csv has moved into 'folk' dir out of 'noinfo' 2019-04-02 00:57:13 +01:00
expoonserver
bb8dbb381f Move cave and entrance data out of 'noinfo' 2019-04-01 23:03:45 +01:00
expoonserver
39c61bd526 Move cave and entrance data out of 'noinfo' 2019-04-01 23:03:45 +01:00
Sam Wenham
144610d6c2 Better error messages 2019-03-31 16:44:58 +01:00
Sam Wenham
10f1cdb458 Better error messages 2019-03-31 16:44:58 +01:00
Sam Wenham
40f413ba47 Ooops shouldn't of commited the DateTime change, yet... 2019-03-31 16:43:21 +01:00
Sam Wenham
a588221524 Ooops shouldn't of commited the DateTime change, yet... 2019-03-31 16:43:21 +01:00
Sam Wenham
9cd8734947 Support html and wiki logbook entrys
Move nearest_station to nearest_station_name and make nearest_station a foreign
key to SurvexStation
Lots of tidying
2019-03-31 15:39:53 +01:00
Sam Wenham
9df91b221b Support html and wiki logbook entrys
Move nearest_station to nearest_station_name and make nearest_station a foreign
key to SurvexStation
Lots of tidying
2019-03-31 15:39:53 +01:00
Sam Wenham
c8551991b2 Remove the redundant render_with_context() as django now does this just with the
render() shortcut
Move from mimetype to content_type, missed in last commit
2019-03-30 17:02:07 +00:00
Sam Wenham
64a4842dcb Remove the redundant render_with_context() as django now does this just with the
render() shortcut
Move from mimetype to content_type, missed in last commit
2019-03-30 17:02:07 +00:00
Sam Wenham
f666b9c396 Update new management command for DB reset
Switch to content_type from mimetype
Make DB reset not nuke so much
Tidy logbook parser
2019-03-30 13:58:38 +00:00
Sam Wenham
a4532a29da Update new management command for DB reset
Switch to content_type from mimetype
Make DB reset not nuke so much
Tidy logbook parser
2019-03-30 13:58:38 +00:00
Wookey
5469794159 Only show unofficial number if it's not already displayed 2019-03-27 01:59:09 +00:00
Wookey
705dd51f30 Only show unofficial number if it's not already displayed 2019-03-27 01:59:09 +00:00
expoonserver
1e26578305 Add reload_db option to databaseReset.py 2019-03-26 23:59:13 +00:00
expoonserver
ddb62f2897 Add reload_db option to databaseReset.py 2019-03-26 23:59:13 +00:00
expoonserver
8b5f81c8f8 Display temporary numbers on main cave index, when they exist. 2019-03-26 23:58:27 +00:00
expoonserver
f8be510509 Display temporary numbers on main cave index, when they exist. 2019-03-26 23:58:27 +00:00
Sam Wenham
27af84da65 Remove the news section as it never gets updated
Fix logbook entry so the edit link works
Tidy the control panel page
2019-03-10 11:05:57 +00:00
Sam Wenham
121f0a6aac Remove the news section as it never gets updated
Fix logbook entry so the edit link works
Tidy the control panel page
2019-03-10 11:05:57 +00:00
Sam Wenham
9646c32819 Remove jquery.min.js from troggle as it busts the footer menu. Yep troggle has a footer menu!! 2019-03-09 19:32:00 +00:00
Sam Wenham
8932bdc466 Remove jquery.min.js from troggle as it busts the footer menu. Yep troggle has a footer menu!! 2019-03-09 19:32:00 +00:00
Sam Wenham
c3ab5c6096 Fix person chronology to get the date from te logbook entry 2019-03-09 18:43:58 +00:00
Sam Wenham
9fa93fdd15 Fix person chronology to get the date from te logbook entry 2019-03-09 18:43:58 +00:00
Sam Wenham
7a7433bc84 Fix people list
Cope with Jimmy McFoo as a name!
Don't set the top expo value in the code whin it is piss easy to calculate
Fix typo from last commit
2019-03-09 18:21:10 +00:00
Sam Wenham
b4296f1736 Fix people list
Cope with Jimmy McFoo as a name!
Don't set the top expo value in the code whin it is piss easy to calculate
Fix typo from last commit
2019-03-09 18:21:10 +00:00
Sam Wenham
ff8c5ef0c1 There is no point having two functions do basicaly the same thing so make the
load all logbooks call load logbook(expo)
Remove the return message from load logbook as it isn't used
2019-03-09 11:18:44 +00:00
Sam Wenham
1bac650aee There is no point having two functions do basicaly the same thing so make the
load all logbooks call load logbook(expo)
Remove the return message from load logbook as it isn't used
2019-03-09 11:18:44 +00:00
Sam Wenham
a22b42e832 Make the logbook parser a little more sane
Move the parser to expo mapping to settings
Set a default parser
Iterate over the expo years rather than the mapping list!
2019-03-06 23:20:34 +00:00
Sam Wenham
9fc80bed35 Make the logbook parser a little more sane
Move the parser to expo mapping to settings
Set a default parser
Iterate over the expo years rather than the mapping list!
2019-03-06 23:20:34 +00:00
Sam Wenham
afa5a8b940 Merge 2019-03-04 20:04:23 +00:00
Sam Wenham
59f8647e0f Merge 2019-03-04 20:04:23 +00:00
Sam Wenham
f593104c04 Backed out changeset: e80a936faab6 2019-03-04 19:39:57 +00:00
Sam Wenham
384b0438b4 Backed out changeset: e80a936faab6 2019-03-04 19:39:57 +00:00
Sam Wenham
dc6d89b0ca Backed out changeset: f23440eb11a3 2019-03-04 19:39:43 +00:00
Sam Wenham
e01507d541 Backed out changeset: f23440eb11a3 2019-03-04 19:39:43 +00:00
Rad
b505a26ce4 rebuild descriptions database, some visuals 2019-02-28 12:36:49 +00:00
Rad
a5e1529514 working on rebuilding everything 2019-02-27 22:29:45 +00:00
Sam Wenham
6f42bd51e1 Revert (I hate hg!!!) 2019-02-26 20:43:18 +00:00
Sam Wenham
42d10cf43d Revert (I hate hg!!!) 2019-02-26 20:43:18 +00:00
Sam Wenham
4e27c90f77 merge 2019-02-26 20:41:47 +00:00
Sam Wenham
2226aa34d5 merge 2019-02-26 20:41:47 +00:00
Sam
0268ff46b3 Add docker readme, settings and update compose file
Fix views_logbooks.py
2019-02-26 19:19:01 +00:00
Rad
1d7cf3f41a Messing with millenialcaves.html or similar 2019-02-26 14:07:45 +00:00
Rad
32c186afd7 Messing with millenialcaves.html or similar 2019-02-26 14:05:41 +00:00
Rad
54a9f7a37c Messing with millenialcaves.html or similar 2019-02-26 12:50:19 +00:00
Rad
e4e8cc5993 Messing with millenialcaves.html or similar 2019-02-26 12:47:50 +00:00
Rad
8703ed5d94 Messing with millenialcaves.html or similar 2019-02-26 12:30:20 +00:00
Rad
a4118261e1 Messing with millenialcaves.html or similar 2019-02-26 12:29:46 +00:00
Rad
6392c1f238 Messing with millenialcaves.html or similar 2019-02-26 12:23:12 +00:00
Rad
4148ece133 Messing with millenialcaves.html or similar 2019-02-26 12:07:45 +00:00
Rad
c724f292ca Messing with millenialcaves.html or similar 2019-02-26 12:03:17 +00:00
Rad
53513b812b Messing with millenialcaves.html or similar 2019-02-26 12:01:55 +00:00
Rad
beffdbd89d Messing with millenialcaves.html or similar 2019-02-26 12:01:30 +00:00
Rad
8bd0df1bab Messing with millenialcaves.html or similar 2019-02-26 10:57:02 +00:00
Rad
4ae43e94f4 Messing with millenialcaves.html or similar 2019-02-26 10:02:57 +00:00
Rad
da88771fd4 Messing with millenialcaves.html or similar 2019-02-26 09:45:17 +00:00
Rad
b6b7d2aa12 Messing with millenialcaves.html or similar 2019-02-26 09:41:02 +00:00
Rad
c733b0f2eb Messing with millenialcaves.html or similar 2019-02-26 02:03:26 +00:00
Rad
9712bf6dfd Messing with millenialcaves.html or similar 2019-02-26 02:01:09 +00:00
Rad
5e4c1493a1 Messing with millenialcaves.html or similar 2019-02-26 01:56:39 +00:00
Rad
41b1334257 Messing with millenialcaves.html or similar 2019-02-26 01:48:52 +00:00
Rad
a2fcbae129 Messing with millenialcaves.html or similar 2019-02-26 01:46:54 +00:00
Rad
e9077542c9 Messing with millenialcaves.html or similar 2019-02-26 01:46:05 +00:00
Rad
79595521a9 Messing with millenialcaves.html or similar 2019-02-26 01:45:03 +00:00
Rad
38b658fd3f Messing with millenialcaves.html or similar 2019-02-26 01:43:54 +00:00
Rad
a89123755c Messing with millenialcaves.html or similar 2019-02-26 01:43:28 +00:00
Rad
0fb9accd05 Messing with millenialcaves.html or similar 2019-02-26 01:41:15 +00:00
Rad
f87df707ab Messing with millenialcaves.html or similar 2019-02-26 01:37:52 +00:00
Rad
a2cb771fc1 Messing with millenialcaves.html or similar 2019-02-26 01:35:55 +00:00
Rad
c888f59ff0 Messing with millenialcaves.html or similar 2019-02-26 01:34:09 +00:00
Rad
43ff6e09be Messing with millenialcaves.html or similar 2019-02-26 01:30:32 +00:00
Rad
810ab3ea4f Messing with millenialcaves.html or similar 2019-02-26 01:18:47 +00:00
Rad
cb5978237b Messing with millenialcaves.html or similar 2019-02-26 01:13:54 +00:00
Rad
622d523c98 Messing with millenialcaves.html or similar 2019-02-26 01:12:14 +00:00
Rad
ee7d2529e7 Messing with millenialcaves.html or similar 2019-02-26 01:08:04 +00:00
Rad
82de967f97 Messing with millenialcaves.html or similar 2019-02-26 01:07:18 +00:00
Rad
466e667e14 Messing with millenialcaves.html or similar 2019-02-26 01:04:09 +00:00
Rad
3c563ce665 Messing with millenialcaves.html or similar 2019-02-26 01:03:22 +00:00
Rad
19a061efa8 Messing with millenialcaves.html or similar 2019-02-26 00:56:46 +00:00
Rad
a397eb9d00 Messing with millenialcaves.html or similar 2019-02-26 00:48:34 +00:00
Rad
e5d864359a Messing with millenialcaves.html or similar 2019-02-26 00:47:35 +00:00
Rad
b2adc285b6 Messing with millenialcaves.html or similar 2019-02-26 00:45:56 +00:00
Rad
8af604262d Messing with millenialcaves.html or similar 2019-02-26 00:43:46 +00:00
Rad
b33ca2b290 Messing with millenialcaves.html or similar 2019-02-26 00:43:05 +00:00
Rad
c4455168c6 Messing with millenialcaves.html or similar 2019-02-26 00:35:28 +00:00
Rad
1b4674acde Messing with millenialcaves.html or similar 2019-02-26 00:33:37 +00:00
Rad
4fac4317a3 Messing with millenialcaves.html or similar 2019-02-26 00:33:04 +00:00
Rad
78bf9986b7 Messing with millenialcaves.html or similar 2019-02-26 00:30:09 +00:00
Rad
5154c0d8e5 Messing with millenialcaves.html or similar 2019-02-26 00:29:16 +00:00
Rad
b01fcc3a6d Messing with millenialcaves.html or similar 2019-02-26 00:23:23 +00:00
Rad
e8585bec42 Messing with millenialcaves.html or similar 2019-02-26 00:22:58 +00:00
Rad
521f0241f8 Messing with millenialcaves.html or similar 2019-02-26 00:21:54 +00:00
Rad
0394becdac Messing with millenialcaves.html or similar 2019-02-26 00:21:27 +00:00
Rad
e5fa636776 Messing with millenialcaves.html or similar 2019-02-26 00:17:56 +00:00
Rad
6beaf4afdd Messing with millenialcaves.html or similar 2019-02-26 00:17:11 +00:00
Rad
822812525e Messing with millenialcaves.html or similar 2019-02-26 00:08:15 +00:00
Rad
a4a92483bd Messing with millenialcaves.html or similar 2019-02-26 00:04:27 +00:00
Rad
3254ba1443 2019-02-26 00:00:34 +00:00
Rad
4c3d0ce7fa 2019-02-25 23:55:06 +00:00
Rad
a99afe07c6 2019-02-25 23:53:19 +00:00
Rad
73bb60eff9 2019-02-25 23:52:47 +00:00
Rad
0a214c5d4b 2019-02-25 23:51:26 +00:00
Rad
29c53f35ab 2019-02-25 23:48:58 +00:00
Rad
3746dab5de 2019-02-25 23:46:52 +00:00
Rad
18dbadd675 space/tab 2019-02-25 23:42:56 +00:00
Rad
ee2cd0d391 trying to add new field 2019-02-25 23:40:53 +00:00
Rad
0cc4e7c7d3 2019-02-25 23:37:12 +00:00
Sam Wenham
478065786f Merge 2019-02-25 23:34:10 +00:00
Sam Wenham
e64d82cd92 Start of moving databasereset to django management 2019-02-25 23:10:24 +00:00
Sam Wenham
12a991920a Get get_absolute_url in the correct place 2019-02-25 23:07:20 +00:00
Rad
0758efb3ec 2019-02-25 22:34:13 +00:00
Rad
54b782c67e tab/space fix 2019-02-25 22:28:30 +00:00
Rad
78a5f656b9 added Rad's playground 2019-02-25 22:24:33 +00:00
Rad
6e23853759 merge 2019-02-25 21:02:30 +00:00
Rad
becfaa1504 change to table 2019-02-25 20:58:32 +00:00
Sam Wenham
77a6015ad6 Fix the All Survex page to work with 1623 area 2019-02-25 20:13:28 +00:00
Sam Wenham
7c15a7439d Decode the url encoded # when looking at wallets 2019-02-24 19:50:45 +00:00
Sam Wenham
b4f4db5754 Deal better with the wallet letter number combo of 2019#X01 2019-02-24 18:55:30 +00:00
Sam Wenham
c6656e6642 Stop django moaning about unit tests from pre 1.6, like we have any anyway! 2019-02-24 16:48:12 +00:00
Sam Wenham
e6fa54d0e5 Fix survey scans
Remove the assert for folders in survey wallets, this does mean currently they
will be ignored by troggle.
2019-02-24 16:46:02 +00:00
Sam Wenham
f16b4e3f47 Make the suryeys importer not explode 2019-02-24 14:29:14 +00:00
Sam Wenham
4ad5b68433 Make things more compatiable with newer python
Fix the expeditions list
Improvements to make it compatiable with django 1.8
Bump the years to add 2018
Update the .hgignore file to ignore junk
2019-02-24 13:03:34 +00:00
Sam Wenham
552730f0a3 Revert urls.py as it contains Django 1.8 upgrade code 2019-02-23 15:43:38 +00:00
Sam Wenham
a1f02e575f Prevent troggle adding the menu if there is one in the file
Add a Docker compose file to bring up a dev troggle easily
Various PEP improvments
2019-02-23 15:30:58 +00:00
Sam Wenham
f58b1db920 Don't create years that aren't here yet troggle goes boom 2018-06-20 18:14:13 +01:00
Sam Wenham
3d2ac06a72 Move the years on a bit 2018-06-20 18:11:12 +01:00
expoonserver
9802f45452 Add missing linefeed on survey-parsing error message 2018-06-18 23:43:20 +01:00
expoonserver
1ad58d6b5d Make sure that cave parser only reads .html files in cave_data dir (to stop foo~ causing 'duplicate cave' error) 2018-06-18 23:17:05 +01:00
expoonserver
6805bcb690 Add 'troggle' namespace to databasereset.py so it runs in django >1.5 2018-06-17 02:41:58 +01:00
expoonserver
c162411f0b FileUploadForm does not work with django 1.7.
It tries to use database during class initialisation.
removed it for now - not sure if it's important...
2018-06-17 02:24:00 +01:00
expoonserver
10a05d686e django.setup needs to be run before any attempt to use database 2018-06-17 02:23:02 +01:00
expoonserver
89ef5c19ff imports must specify the application name i nlater django versions.
databasereset updated accordingly.
2018-06-16 19:00:26 +01:00
Sam Wenham
4385ce86c1 Add the extra setting for the threed cache to all the template configs 2018-04-20 20:58:05 +01:00
Sam Wenham
46124a770f Fix the django for the spinny js cave viewer.
Make the paths settings (don't hard code things like this!!)
Add " round spinny urls from the late merge (the rest were done for the move off 1.4.2
2018-04-20 20:55:12 +01:00
Sam Wenham
6f6327d267 Merge with django-upgrade 2018-04-17 22:19:20 +01:00
expoonserver
6710a469ee Add CaveView spinny caves view to each troggle cave page 2018-04-17 21:57:02 +01:00
Sam Wenham
174c475ec7 Add default BooleanField(default=False) for django 1.7 compatibility 2018-04-17 21:51:39 +01:00
Sam Wenham
d3b42a125d 1.7 requiremnets 2018-04-15 16:45:07 +01:00
Sam Wenham
2f2f4d396d New vars needed to make django 1.7 and tinymce work 2018-04-15 16:36:23 +01:00
Sam Wenham
e1eea7088f Django 1.7 wsgi.py 2018-04-15 16:29:30 +01:00
Sam Wenham
760fa3114f missed from last commit 2018-04-15 16:28:52 +01:00
Sam Wenham
798ae591c6 Django 1.7 mostly working. Big refactor so probably bugs 2018-04-15 16:28:13 +01:00
Sam Wenham
7877efba0a Up to 1.6.11 on stretch. New manage.py. Some tidying 2018-04-15 12:00:59 +01:00
Sam Wenham
cfa888fde6 More cleanup and modernisation 2018-04-14 21:37:12 +01:00
Sam Wenham
cedcb0988a Clean up indenting in models
add registration required modules
2018-04-14 21:14:19 +01:00
Sam Wenham
c939013b14 Add ref as a valid survex command to prevent errors 2018-04-14 16:13:21 +01:00
Sam Wenham
458d0e1ebc add all the docker commands to bulid and run troggle in a container (more of a guide than something to run) 2018-04-11 22:32:47 +01:00
Sam Wenham
776152ef47 Add missing expose container port and commneted command to auto start the dev server 2018-04-11 22:18:15 +01:00
Sam Wenham
9f285a9f34 Update requirements for 1.5.12 and preserve the 1.4.22 requiremnets 2018-04-11 22:13:31 +01:00
Sam Wenham
302ad0632e Add the docker files and the pip requiremnets.txt to allow install usign pip 2018-04-11 22:03:48 +01:00
Sam Wenham
ffb5d7bdda Upgrade to django 1.5, some functions have been changed
url in templates now requires quotes roung the first arg
USE_TZ added
2018-04-11 22:02:57 +01:00
Sam Wenham
242cf4741a Import Image from PIL to support newer python
import the Django registration module rather than the troggle one
2018-04-10 01:34:06 +01:00
Wookey
41a14f161d Avoid barf if URL field in new cave form is left blank. 2018-02-28 15:57:27 +00:00
Wookey
f0e1406c5f Update old website base URL in template from cucc.survex.com/expo to expo.survex.com 2018-02-28 15:55:00 +00:00
expoonserver
d7c6676c49 Test whether url is not 'None' before applying 'startswith' test in
forms.py entering new caves, otherwise it barfs.
2017-10-25 03:49:03 +01:00
expoonserver
5e9dfc6ea6 Fix Scan scanning, so that 2015#X01 format (with 'X') is accepted in
scan directories. Allows 2016 data to be processed.
2017-03-07 15:44:42 +00:00
Sam Wenham
27fca090fc Bring troggle a little more up to date 2016-09-04 13:47:26 +01:00
expo
716131f005 Fix cave pages to have entrances and description on one page.
Fixes broken links on description and entrance pages.
Removes need for jquery-ui.
2016-07-02 23:42:47 +01:00
expo
496280f3e6 merge serve changes
HGerver canges Enter commit message.  Lines beginning with 'HG:' are removed.
2016-06-09 04:16:46 +01:00
Sam Wenham
0dd0951b28 Merge 2016-05-20 21:35:58 +01:00
Wookey
b9597fbb57 Merge 'expofiles' instead of 'expoimages' config changes 2016-01-27 04:27:38 +00:00
Wookey
edc6591554 Correct typo on cave and entrance template files
('If you edit this files...')
2016-01-27 04:24:44 +00:00
expoonserver
560b9bf985 Move expoimage to expofiles
Relies on permanent rediect in apache config to keep old URLs working
everywhere.
2015-10-02 15:10:04 +01:00
expoonserver
6652e3f160 remove code saying we can't do interlaced pngs. It's fine now. 2015-10-02 15:07:03 +01:00
expo
b0f1f73ce4 Store expo user/password info in localsettings file, and not repeated in databaseReset script 2015-09-16 01:58:51 +01:00
expo
214d887c57 Commit changes made on expo 2015 2015-09-16 01:52:45 +01:00
Sam Wenham
6b16724c2a tidy up after merge 2015-08-22 13:28:17 +01:00
Sam Wenham
f1bb927063 Merge settings changes 2015-08-22 13:26:38 +01:00
expo
eeda1bed73 properly quote JSLIB_PATH and mke clear that example password is just an example 2015-07-26 00:38:10 +01:00
Sam Wenham
751ec9517f Change JSLIB_PATH to JSLIB_URL and correct the path 2015-07-01 18:22:25 +01:00
Wookey
228814be33 Fix unquoted string in troggle localsettingspotatohut.py 2015-07-01 03:55:12 +01:00
Sam Wenham
cebcbeb73a sysadmin to expouser for email 2015-07-01 01:26:04 +01:00
Sam Wenham
057b09dca9 Move expo user settings out of databasereset.py to localsettings where they really belong 2015-07-01 01:18:25 +01:00
Sam Wenham
480541ae54 Add a little style 2015-06-28 13:52:33 +01:00
Sam Wenham
60303d041c Remove unnecessary escape slashes 2015-06-28 13:46:28 +01:00
Sam Wenham
5a911ecec7 I think this is breaking prospecting 2015-06-28 13:39:50 +01:00
Sam Wenham
7056f9a8b2 Remove balkonhoehle from the QM parser as this will need a lot of effort to get working 2015-06-28 12:28:18 +01:00
Sam Wenham
34036581f2 Correct JSLIB_URL 2015-06-27 13:01:15 +01:00
expo
dcc67fddda Don't put passwords in the repo 2015-06-24 04:41:50 +01:00
expo
03cad0a37f Survex parser fix to avoid allocation on error (by martin). 2015-06-24 04:09:19 +01:00
expo
a4651eaa0a Added warnings that the database will need updating is cave or entrance data files are modified 2015-06-21 15:11:51 +01:00
expo
7aed3d3b30 Moved notable caves to settings.py, link to a script to fix permissions 2015-06-21 15:08:09 +01:00
expo
4771f52b20 Have different links for system js files and troggle js files 2015-06-21 15:06:44 +01:00
Wookey
77ad85b05c merge balconhoehle changes from server 2015-06-19 01:55:51 +01:00
Wookey
01d877d26e Use django-registration, not a local copy.
This old one is uses deprecated hashcompat.
2015-06-10 23:52:49 +01:00
DWalker
e84d990366 Add in balkon hoehle QM list 2015-05-25 21:55:54 +01:00
Wookey
e06be10f7f Change password of 'expo' user created by databasereset script to match that used elsewhere 2015-05-25 21:26:26 +01:00
Wookey
fe6750e824 Fix up obvious URLs containing subarea names (smkridge) 2015-04-08 03:40:57 +01:00
Wookey
d29fe2ee1c Merge in Sam's parser debugging 2015-04-08 03:27:48 +01:00
Wookey
1156b1d3ea rename troggle paper.odt to troggle_paper.odt as space in repo are a
pain
2015-04-08 03:24:00 +01:00
Wookey
126a10cf94 Rename troggle paper to not have a space in it. 2015-04-06 02:38:24 +01:00
Sam Wenham
4560e0da84 Revert all of this the date is needed and is a not null in the db 2015-01-26 21:53:32 +00:00
Sam Wenham
f9c2e0e170 One more try 2015-01-26 21:15:17 +00:00
Sam Wenham
cf413dd03c Ooops that wasn't right 2015-01-26 21:13:47 +00:00
Sam Wenham
4965678443 Don't assert an error on bad date formats 2015-01-26 21:12:27 +00:00
Sam Wenham
67f94f9436 A little more verbosity 2015-01-19 22:48:50 +00:00
Sam Wenham
1186662960 Add a little verbosity 2015-01-19 22:41:48 +00:00
Sam Wenham
3010961383 Try and ignore files that don't end .html (We really need to change to .xml) eg .html.orig!!
Change the index on troggle to move on with the year
2015-01-19 21:28:35 +00:00
Wookey
806fd41130 remove two files accidentally included in last commit 2014-09-11 07:41:33 +01:00
Wookey
af07161f05 remove internal copies of jquery, jquiery-forms, jquery-ui+themes,
django-feincms and codemirror
2014-09-11 07:40:58 +01:00
Wookey
5ff759db93 Fix templates to use system javascript for jquery, jquery-ui and
jquery-ui themes
2014-09-11 07:38:45 +01:00
Wookey
7f292d402b Use REPOS_ROOT_PATH so there is just one place to change paths 2014-09-11 06:33:34 +01:00
Wookey
c180780da9 Update the README file a bit - still needs work. 2014-09-10 23:46:05 +01:00
Wookey
d75862bc41 Merge change of 'cavesnew'->'caves' in databasereset. 2014-07-28 01:22:52 +01:00
Wookey
7cdb603d75 Add 107 to notable caves (noting that this is hard-coded into
core/views_caves.py which is just shoddy)
2014-07-28 01:21:24 +01:00
expo
94c44b0d7b Change databasereset to use 'caves' instead of 'cavesnew' for reloading the cave database 2014-07-28 00:18:10 +01:00
expo
4a3d181097 Set potato hut localsettingsfile to have correct URLs 2014-07-23 09:47:48 +01:00
Sam
d8863dca48 Fix media url to allow for working in the hut 2014-07-23 09:10:31 +01:00
expo
e0c439e850 Add a new config file for the potato hut setup. 2014-07-23 09:11:17 +01:00
Wookey
f4f1b3ca6d Allow comma in starcommands (*,fix) (comma is default valid *set blank) 2014-07-01 02:26:26 +01:00
Wookey
4a93790c7e Fix survex parser to allow whitespace between * and command (as survex
does).
2014-07-01 02:12:34 +01:00
Wookey
5265acd9dc merge in survex parsing changes from server. 2014-06-26 02:37:55 +01:00
expoonserver
9f69bb5fca Remove spurious real password from example localsettingserver.py file.
Add comment on how to use it.
2014-06-26 02:35:37 +01:00
expoonserver
b1d6e1c3d5 Replace assert on unrecognised commands with print, so that a minor
parsing issue doesn't completely kill a parsing update.
Add parsing for requires and alias commands.
2014-06-26 02:34:19 +01:00
Wookey
659703b221 Merge with server version 2014-06-09 19:30:06 +01:00
expoonserver
3869bd536e remove humongous troggle_log.txt from repo 2014-05-19 03:12:16 +01:00
expoonserver
408d154d3f Refer to debian package, not upstream URL 2014-05-19 03:11:46 +01:00
Wookey
44e3eb8a18 Tidy up urls file a little 2014-05-14 20:46:59 +01:00
wookey
51a3cecc02 Document 'cavesnew' option in databasereset.py - which just reads in
caves datafiles.
2013-10-07 23:45:59 +01:00
olly
6b4ea7b83e merge 2013-08-08 15:48:10 +02:00
expo
da71cca22f Prospecting guide and images and few minor other things. 2013-08-01 17:00:01 +02:00
wookey
5c945e3431 Put correct user for mysql on seagrass back into config (It was accidentally overwritten in recent changes) 2013-07-06 09:28:39 +01:00
Wookey
ba5bc365c1 merge support for django 1.2 location for auth module 2013-07-02 21:12:59 +01:00
Wookey
c362b1b529 3rd attempt at getting the right syntax for the CSRF protection in 2013-07-02 21:11:07 +01:00
Wookey
f90b6dc7ab update location of auth module for django 1.4 2013-07-02 21:10:30 +01:00
wookey
a6a9016548 Add support for old and new (1.4 on) location for auth module. 2013-07-02 21:05:48 +01:00
Wookey
5351108ec1 merged in proper CSRF changes from server 2013-07-02 20:23:55 +01:00
Wookey
7759e481d4 Change database syntax to modern format as old style no longer
supported in django 1.4
2013-07-02 18:13:27 +01:00
Wookey
69c3a06c98 Remove support for django 1.0 CSRF as we only care about 1.2 or later 2013-07-02 18:12:18 +01:00
Wookey
d1ad8730d7 Add CSRF protection to registration form (and remove annoying second
password)
2013-07-02 18:10:45 +01:00
wookey
f3a570a21d Add csrf token to registration forms 2013-07-02 17:26:35 +01:00
Wookey
f626d3304d parsing_log should not be saved in the vcs 2013-07-02 00:49:07 +01:00
Wookey
7eb4c89bf0 Don't explode if a master survex file is not found for a directory -
that shouldn't cause total failure to read the database in.
2013-07-02 00:47:42 +01:00
wookey
9435be0f19 Add 'people' option to DatabaseReset.py, to read in just the folk list after update.
Not sure that it actually works mind...
2013-07-02 00:34:58 +01:00
wookey
7f108f6d9a Set title to show 1976-2013
Put quick link to 2011 back as that one works
2013-07-02 00:33:53 +01:00
wookey
3f98470af8 Add a function for running people parser
And comments on how logbooks can't be read in until 'year' exists in database
2013-06-25 15:59:19 +01:00
wookey
e58b69782c Add note on how to create a new year in troggle. 2013-06-25 15:56:19 +01:00
wookey
e49e22b37c Removed asserts which meant that if any 'odd' .svx files, or directories
with no obvious 'controlling' svx file, were added to the dataset then the
survex viewer code exploded and the website didn't work.

It's wrong that adding a new cave with an oddly-named .svx file can break
the website in this way, so these asserts are wrong.
2013-06-24 23:32:12 +01:00
wookey
82e69b4f05 Add parsing_log.txt to the files ignored by the VCS. 2013-06-24 23:29:14 +01:00
wookey
ea9266ecf9 Add help command and usage info to databaseReset.py 2013-06-24 01:31:14 +01:00
wookey
99ea6778ad Add comment to identifycavedir function
and remove now-disused special-case filename
2013-06-24 01:30:17 +01:00
wookey
ccd80e74f8 Change template headers to show 2012/2013 as shortcuts 2013-06-23 03:19:41 +01:00
Wookey
3057d2a232 Add checking for compass too
Only print filenames on error by default
2013-05-22 02:33:47 +01:00
Wookey
d1ac659d4f Add error check in place where parser died 2013-05-22 02:10:58 +01:00
wookey
bb1989d0f0 Add some exception checking to parsers/caves.py so that missing entrance
slugs don't blow up the import. Also reduce the noise, so
you just get a warning about missing slugs printed out
2012-09-24 23:23:38 +01:00
wookey
418e5e1d3f Add debug for which entrance file was being read so we get a clue where to look when 'databasereset newcaves' falls over 2012-09-24 22:38:35 +01:00
Wookey
3b12e6d975 Add some debug to cave parser as it's easy to make it fail
e.g. by referring to slugs that don't exist.
2012-09-24 22:29:18 +01:00
expoonserver
54d7f1d097 Remove jgtfile URLs (presumably no longer needed) 2012-09-08 01:12:17 +01:00
Martin
cfc90deb83 Merge 2012-08-14 23:49:26 +02:00
Martin
1a0e577606 Bug fixing of cave and entrance forms removal of slugs 2012-08-14 22:51:15 +02:00
Martin
a05fe94d90 ignore files ending in ~ 2012-08-14 15:31:34 +02:00
Martin
8e64062214 added entrance locations 2012-08-14 15:08:08 +02:00
Martin
8c1882eec8 fixed spelling 2012-08-14 15:06:18 +02:00
Martin
8dd51096cf allow extensions to be capatalised 2012-08-14 15:05:15 +02:00
expo
ecd5bbcb1d Started removing foreignkeys to caves, to achieve greater flexability. Some log book entries stuff may be broken. Add ability to make new caves and entrances via website. 2012-08-12 19:10:23 +02:00
Martin Green
6d5babd331 Prospecting template 2012-08-10 19:34:44 +02:00
Martin Green
79b7d32664 Made a prospecting guide and fixed survex station description. Removed parsing of underground descriptions to wikis. 2012-08-10 19:02:13 +02:00
expo
dd66ad835a Fixed directory names for the survey scans such that surveys could be found. It did not seem possible to simply change the localsettings.py file to get it to work. 2012-08-08 11:29:15 +02:00
expo
a29fd964bd Prevent modification of auto generated files 2012-08-06 12:56:20 +02:00
expo
1ef274ec1d Editing no longer changes files more than nesecary. Removed TinyMCE editing. /Sumbit/Submit 2012-08-06 12:19:48 +02:00
expo
0f5627505f Fix broken markup 2012-08-05 21:37:46 +02:00
expo
c0782e1cca Fixed cave order 2012-08-05 19:28:34 +02:00
expo
ed1d273e03 Fixed cave order 2012-08-05 19:26:24 +02:00
expo
9654e5da1c FIx base template so admin link, expoweb link work and use consistent base URL 2012-08-05 02:33:48 +02:00
expo
8040b746b4 Note that the instructions for adding a survey are all wrong. 2012-08-05 00:35:02 +02:00
expo
05004aa874 Fix up parser paths so everything is found 2012-08-05 00:26:05 +02:00
Martin Green
4a21720745 Merge 2012-06-10 17:24:10 +01:00
Martin Green
13cb2e9b0f no need to export to cavetab2 anymore 2012-06-10 17:22:50 +01:00
ExpoOnServer
0259947cda merge 2012-06-10 17:21:26 +01:00
ExpoOnServer
080684e56f no need to export cavetab2 anymore 2012-06-10 17:20:57 +01:00
Martin Green
4b269bb234 update caves from new cave file format not cavetab2.csv 2012-06-10 17:16:33 +01:00
Martin Green
1a62931202 Merge 2012-06-10 16:56:44 +01:00
Martin Green
c2029df3c9 New parser for new cave format 2012-06-10 16:56:12 +01:00
ExpoOnServer
4a074295ad Looks like photos have been added by editing urls.py. 2012-06-10 16:19:17 +01:00
Martin Green
711fefb0da Start to change dataformat for caves, along with there editing. Start to change survex reader to cope better with equates/tags. 2012-06-10 14:59:21 +01:00
Martin Green
fd12e70f78 Editing for entrances along with caves
More detailed display of entrances
2012-05-23 09:23:40 +01:00
Martin Green
fac89bae30 Render a cave editing page. Nb it does not do save anything yet. 2012-01-07 19:05:25 +00:00
Wookey
ab97e367cb merge from upstream 2011-09-15 12:13:07 +01:00
Wookey
ae693ca4c5 Add 2010 and 2011 logbooks to parsing list (can we make this auto
somehow - by agreeing a logbook format, or letting it guess)?
2011-09-15 12:12:18 +01:00
expo
77dea07b40 branch merge 2011-09-02 03:39:20 +02:00
expo
77dcf7f759 Remove old ref to goatchurch in localconfig 2011-09-01 01:50:51 +02:00
Martin Green
59e7c4d5df Bug fix 2011-08-08 13:11:57 +01:00
Martin Green
0b5e57b85e ignorecase when finding html tags 2011-08-08 12:58:02 +01:00
Martin Green
c623acf832 template changes. Fix link to css. 2011-08-08 12:40:47 +01:00
Martin Green
36b1888f46 Added 'page not found do you wnat to make this page' page. Minor tweaks 2011-08-08 12:18:47 +01:00
Martin Green
c09a668620 Fix logbook editing 2011-08-08 12:17:38 +01:00
Martin Green
e85c386375 =Make a common base for expoweb pages. Ignore any header information in expoweb except titles. 2011-08-08 10:58:50 +01:00
Martin Green
c66ecc4d7f Allow pages to be rendered when the body tag has attributes. Put an edit link on the homepage. 2011-08-08 10:04:59 +01:00
Martin Green
13fe89af9f Allow for editing flatpage titles, and made a common uneditable list of links. 2011-08-08 09:51:47 +01:00
Martin Green
d8fe39ae86 Allow the viewing of noinfo caves on non public website without login 2011-08-08 08:51:12 +01:00
Martin Green
5f5359f933 Changed regex for finding head and body of flat pages. 2011-08-07 19:17:27 +01:00
Martin Green
e820a516de bug fix for edit link for index files 2011-08-07 17:30:18 +01:00
expo
e9fdea80c0 Changed ubuntu local settings to be applicable to the expo machine 2011-08-07 16:12:52 +02:00
expo
9534bd8881 Make caveindex link to urls in the original hierachy such that their
hyperlinks and images work.
2011-08-07 16:11:35 +02:00
ExpoOnServer
5be508620e update localsettings for server and expo machine 2011-07-14 03:50:49 +01:00
Wookey
82e968d5c7 Attempt 17b to end with the right files as tip 2011-07-12 02:44:07 +01:00
Wookey
b4b060a962 Add odt and ods mime types to our list.
Maybe this should just be read in from the real list...
2011-07-12 00:57:48 +01:00
ExpoOnServer
64e5e9d45c merging correct urls.py for /troggle dir in 2011-07-12 00:49:24 +01:00
ExpoOnServer
881215e815 Add empty troggle_log.txt file to save doing it by hand 2011-07-12 00:02:01 +01:00
ExpoOnServer
35cd983cc9 I seem to be going wrong in circles here 2011-07-11 23:45:12 +01:00
Wookey
0a70039dee really, really get all version the same! 2011-07-11 23:43:32 +01:00
ExpoOnServer
18ccc57f87 add /troggle dir (Martin's changes to get main site back as entry point) 2011-07-11 23:35:11 +01:00
Wookey
c23fcc5b06 rest of martin's changes, without reverting lineend issues 2011-07-11 23:28:23 +01:00
Wookey
21ff3b8b5d Add changes from martin 2011-07-11 23:19:48 +01:00
Martin Green
97c388dba0 Moved troggle main page to /troggle added a link in flat pages.
Now / takes you to the expoweb index page
2011-07-11 22:38:40 +01:00
Martin Green
10799e2ce3 Do not make an entrance redirect for entrances without there own pages 2011-07-11 22:37:49 +01:00
Martin Green
7ef6b1fcc2 implemented mimetypes, index.htm(l) and fixed edit view 2011-07-11 22:36:48 +01:00
Martin Green
7a220b4c87 Change absolute url for caves to there expoweb url, such that links work 2011-07-11 22:35:32 +01:00
Wookey
dc1327674c remove all the DOS linefeeds 2011-07-11 02:10:22 +01:00
Wookey
c8ff8e3ef6 Add /index.htm to EXPOWEB root URL in main template so that you get
the static stuff
2011-07-11 01:55:12 +01:00
Wookey
f766df597c undosify lineends 2011-07-11 01:49:03 +01:00
Wookey
bab92cb88c merge martin's tip again 2011-07-11 00:52:58 +01:00
Martin Green
5d8a5494cd Split up tags such that they use ajax 2011-07-11 00:50:07 +01:00
Wookey
129d93dfa7 Merge from Martin's tip 2011-07-11 00:49:18 +01:00
Martin Green
65c55f0f21 Removed conversion to wiki, replaced Surveystation models with text, added area 1623 to all relevant caves. 2011-07-11 00:15:59 +01:00
Martin Green
8578a3097a Added flat pages for entrance and special flatpage redirects.
Enetrances should probably store their urls like cavers.  Maybe the flatpages should be handled by the app Aaron installed.
2011-07-11 00:13:06 +01:00
Martin Green
de5f68e42c Removed links to removed forms 2011-07-11 00:04:30 +01:00
Martin Green
f44b0be459 slug views, start of cave eidt form, cavelist splitting up by kataster area etc. 2011-07-11 00:03:36 +01:00
Martin Green
a128401d49 Added parsing of all.svx, along side parsing individual caves.
Added the making and parsing of all.pos to determine the location of stations.
Mare work is required so the caves are parsed and stored only once.
Survex parsing appears to include bugs, that print out errors.
2011-07-11 00:01:12 +01:00
Martin Green
5075ded032 Removed modelforms for Caves started to add normal forms 2011-07-10 23:57:31 +01:00
Martin Green
47c2e87979 Removed SurveyStation model (not SurvexStation) 2011-07-10 23:55:54 +01:00
Martin Green
53352e7987 Added THREEDTOPOS setting for survexs 3dtopos program 2011-07-10 23:53:32 +01:00
Martin Green
44f86a7d6f Added url to cave and turned entrances station names and removed the previous SurveyStation model.
Note caves should be rendered in the directory of their original url to make links work.
Note SurveyStations appeared to duplicate SurvexStations.
Note Given we want to be running from a mercurial repository, it is easiest to store the names of survey stations rather than foreign keys.
2011-07-10 23:52:18 +01:00
Martin Green
c37124d9c4 Add ability to views caves via their cave slug. Not recommended until links are fixed. 2011-07-10 23:48:13 +01:00
Martin Green
69ab1e0249 Changed to regex to make 2003 expo logbooks parse 2011-07-10 23:45:45 +01:00
Martin Green
2fd8052ac2 Added redmund style for jquery-ui 2011-07-10 23:40:52 +01:00
Wookey
28924db9f8 merge fix from martin's tip. 2011-07-10 23:30:36 +01:00
Martin Green
50545af223 Added editing of flat pages. Added slugfields to models to refer to them. 2011-06-02 19:16:16 +01:00
expo
30829ff9c8 debug 2011-05-02 03:25:43 +01:00
Martin Green
ede9e4a9bd debug 2011-05-02 03:23:59 +01:00
Martin Green
04d0e80430 debug 2011-05-02 03:22:45 +01:00
Martin Green
366d4736ca Try to fake crsf tags so site works on djang0 1.1 2011-05-02 03:20:31 +01:00
Martin Green
f3391a912e Attempt to get CSRF tag not breaking django 1.1 2011-05-02 03:13:54 +01:00
Martin Green
52eb4030d0 Attempt to get csrf tag working in django 1.1- 2011-05-02 03:11:17 +01:00
Martin Green
835680f0ee Get CSRF middleware to work on django 1.1- and 1.2+ 2011-05-02 02:51:14 +01:00
Martin Green
cdf54e0f9b Added ability to host website not at the root, eg. http://m.com/troggle/ 2011-05-02 02:37:33 +01:00
Martin Green
b439d40120 Debugging, and make get_name function accessable (should really be renamed) 2011-05-02 02:15:54 +01:00
Martin Green
cb744ddeef CRCF protection 2011-05-02 02:14:15 +01:00
Martin Green
872ffe5882 decorator to check if user is logged in if settings.PUBLIC_SITE 2011-05-02 02:13:27 +01:00
Martin Green
671e946c6d settings.PUBLIC_SITE, login required if public for logbook entry, CRCF middleware 2011-05-02 02:12:26 +01:00
Martin Green
3928609c29 Bug fix to expedition links 2011-05-02 00:56:53 +01:00
Martin Green
e942c839a1 Link to expowebsite 2011-05-02 00:53:44 +01:00
Martin Green
bff34aafb9 FIX2 2011-05-01 23:21:47 +01:00
Martin Green
7623943f3e Fix 2011-05-01 23:11:18 +01:00
Martin Green
6d7691791a Added settings hooks for TinyMCE. On debian apt-get install tinymce python-django-tinymce 2011-05-01 19:58:38 +01:00
Martin Green
b001df1f53 edit logbooks, new logbook format, increased database normalisation 2011-05-01 19:32:41 +01:00
Martin Green
1cc7f2d92e Allow survey scans to be scrapped with a file in the top level directory of the year 2011-05-01 19:20:25 +01:00
Martin Green
7a0a898bc6 Added variables to configure TinyMCE 2011-05-01 19:17:57 +01:00
Martin Green
41aca4e2d7 Added files for jQuery to allow for UI and dynamic formsets. 2011-05-01 19:15:34 +01:00
Martin Green
7e89b12004 Setup files for hg to ignore (*.pyc, db*, localsettings.py) 2011-05-01 19:13:07 +01:00
Aaron Curtis
7bac9f829e Renaming main branch from 'svn' to 'default' per mercurial convention.
Hopefully this will keep the main branch as the active one, so the Erebus branch is only used if requested.
2009-09-27 00:43:01 -06:00
goatchurch
2435639498 rolled back a bad update 2009-09-14 23:23:09 +01:00
expo
2be3e4ce9d get survey scans into database 2009-09-14 23:09:50 +01:00
goatchurch
1294444026 make 2008 logbook correctly parse 2009-09-14 22:52:46 +01:00
goatchurch
7578b65573 able to save sketches up from tunnel 2009-09-13 17:27:46 +01:00
goatchurch
ced45c92f7 tunnelfiles scheme added 2009-09-11 23:56:47 +01:00
goatchurch
f21cddb2d0 modelviz added 2009-09-11 09:04:59 +01:00
goatchurch
735b729a41 survey scans features added 2009-09-10 22:07:31 +01:00
goatchurch
c5b933f922 parsing 2009-09-08 23:05:04 +01:00
goatchurch
ce6fe2590d login required for saving survex files 2009-08-29 18:35:02 +01:00
goatchurch
7509a76eb0 login required for saving survex files 2009-08-29 18:34:18 +01:00
goatchurch
41eaa06e55 login required for saving survex files 2009-08-29 18:34:01 +01:00
goatchurch
7429749004 login required for saving survex files 2009-08-29 18:33:44 +01:00
goatchurch
709f9954f4 login required for saving survex files 2009-08-29 18:33:28 +01:00
expo
29adaa03c6 get rid of photo 2009-08-29 18:08:55 +01:00
goatchurch
9f169fb2b9 enable admin url 2009-08-29 17:30:07 +01:00
goatchurch
6b8294d9dc remove dependence on latest django 2009-08-29 16:23:11 +01:00
goatchurch
0ea70273fe quick hack to make work in django1.0 Photo to DPhoto 2009-08-23 23:29:05 +01:00
goatchurch
c66b5e2dad [svn] latest hacking for various statistics 2009-08-05 11:58:36 +01:00
goatchurch
9077462893 [svn] now with ability to make new svx file 2009-08-01 07:31:27 +01:00
goatchurch
7158a79a34 [svn] full checkin. animations disabled, sorry 2009-07-27 13:43:43 +01:00
goatchurch
68060d6118 [svn] some file reading things 2009-07-27 13:42:54 +01:00
substantialnoninfringinguser
ddbdc73e7e [svn] fix indexError bug julian found 2009-07-22 16:35:49 +01:00
substantialnoninfringinguser
263b640641 [svn] Various bug fixes, using more raw_id fields in admin so it loads faster. I had to put onLoad="contentHeight();" back into the base template. This is a bad solution, I would rather use Martin's, but it wasn't working. 2009-07-22 16:18:00 +01:00
goatchurch
84ad39f24a [svn] bugged 2009-07-21 07:20:34 +01:00
substantialnoninfringinguser
408a4c79aa [svn] 2009-07-17 01:14:37 +01:00
substantialnoninfringinguser
b9bbccfe00 [svn] * Make descriptions parser also replace links to descriptions from Cave models' underground_descriptions with wikilinks for valid (existing) links
* Make entrances searchable in admin by cave kataster number
2009-07-16 05:37:33 +01:00
substantialnoninfringinguser
05d262e42b [svn] only logged in users should see the tasks page thing 2009-07-15 01:55:26 +01:00
substantialnoninfringinguser
18e61d19f5 [svn] * wikilink to html for subcaves and cave descriptions
* fix header regex
2009-07-12 06:30:24 +01:00
substantialnoninfringinguser
4a073ea161 [svn] Add regex to turn ==headers== into <h2>headers</2> 2009-07-12 05:54:08 +01:00
substantialnoninfringinguser
2993ca74cc [svn] override save for CaveDescriptions to scan qm wikilinks and add into the manytomany field linked_qms 2009-07-11 01:36:00 +01:00
substantialnoninfringinguser
1566923d5c [svn] Make QM wikilinks work in new format, and fix cave description parser to output working wikilinks. 2009-07-09 05:08:21 +01:00
substantialnoninfringinguser
b0073caf5f [svn] not ready for that yet 2009-07-06 08:35:08 +01:00
substantialnoninfringinguser
8ad044cb2c [svn] * Make Q< wikilinks work again
* Add new ajax bit in LogbookEntry admin which checks for QMs not in wikilink format and allows one click fixes. Soon to be expanded to check for wikilinks that aren't in foreignkey.
* Tweaks to admin including using raw_id_fields for PersonExpedition & other foreignkeyed models with lots of instances.
2009-07-06 08:31:24 +01:00
martin speleo
8a9eb32aaf [svn] wiki_to_html changes.
Changes views of qm model.
2009-07-04 19:35:06 +01:00
substantialnoninfringinguser
7f2199405d [svn] 2009-07-04 19:29:19 +01:00
substantialnoninfringinguser
38a545e174 [svn] Remove old subcave model, along with mptt and feincms. Also move OtherCaveNames admin representation to an inline in Cave. 2009-07-04 19:26:51 +01:00
substantialnoninfringinguser
4f0271ad49 [svn] 2009-07-04 18:41:48 +01:00
martin speleo
7fc1602f7a [svn] Initial and poor attempt at a view for cave descriptions. 2009-07-04 18:11:20 +01:00
martin speleo
aa26690e33 [svn] Pareser for cave descriptions 2009-07-04 17:19:30 +01:00
martin speleo
09581829d1 [svn] Changed addToArgsSurveyStation such that it does not pass a surveystation model to html_to_wiki. Which was unecessary as html_to_wiki returned it without modification. By removing it html_to_wiki can be cleaned up. 2009-07-04 17:08:48 +01:00
martin speleo
3afb94f5d2 [svn] Work on turn html pages into cavedescription models.py.
Moved parser/cavetabs html_to_wiki function to utils.py
Added databaseReset.py desc to refresh the cavedescriptions.
2009-07-04 16:42:17 +01:00
martin speleo
29f084613d [svn] removed redundant import 2009-07-04 16:39:59 +01:00
substantialnoninfringinguser
dd76a1a0be [svn] * Adding JS fill in next QM number via ajax.
* Slight models cleanup- get rid of TroggleImageModel class, use mixin instead.
* Collect various troggle shared functions into utils.py
2009-07-04 08:27:49 +01:00
martin speleo
c132477f80 [svn] Added cavedescription and new subcave.
Changed parsers/survex to read *title into subcave
2009-07-04 00:28:28 +01:00
martin speleo
92635f6f68 [svn] Change to get js in admin work for feincms 2009-07-04 00:26:12 +01:00
martin speleo
65ef255b99 [svn] Fixed the following of *includes by adding white space to the end of the regex. 2009-07-03 23:56:39 +01:00
substantialnoninfringinguser
854fe85132 [svn] 2009-07-03 21:59:31 +01:00
martin speleo
4da6203828 [svn] Fixed setContentHeight to work properly for eye candy view, whilst removing it from the non-eyecandy view 2009-07-03 21:29:02 +01:00
martin speleo
7db1aae5ee [svn] Remove broken import search 2009-07-03 21:04:28 +01:00
substantialnoninfringinguser
b4388d838e [svn] 2009-07-03 20:49:04 +01:00
substantialnoninfringinguser
8446047ab2 [svn] Brief code cleanup. 2009-07-03 05:31:49 +01:00
substantialnoninfringinguser
dc19150eba [svn] whoops 2009-07-03 00:51:41 +01:00
substantialnoninfringinguser
a89139763f [svn] Use template block "related" for related objects. Various cleanup, fix personexpedition date views. 2009-07-03 00:50:56 +01:00
substantialnoninfringinguser
dab138c731 [svn] More fallout of renaming expo to core. Also fix 2009-07-02 23:02:42 +01:00
substantialnoninfringinguser
205a73917d [svn] Fix leftover from expo -> core rename, and add databaseReset.py to README.txt 2009-07-02 22:31:28 +01:00
substantialnoninfringinguser
ae3fe8cd42 [svn] Renaming troggle.expo to troggle.core. To do this, used:
perl -p -i -e "s/expo(?=[\s\.']+)/core/g" `find -name \*.py`

and then manually checked each change (had to remove a couple)
2009-07-02 20:43:18 +01:00
substantialnoninfringinguser
c0b274767b [svn] Add photos wiki syntaxes: e.g.
[[display:centre photo:andyc.jpg]] where centre is a class applied to image, and andyc.jpg is the filename of a Photo model instance. Image will be displayed as thumbnail with link to full size image.
[[photo:andyc.jpg]] will produce a link to the admin page for the andyc.jpg Photo model instance.
[[photo:andyc.jpg Title of the link]] will produce a link to the admin page for the andyc.jpg Photo model instance with link text "Title of the link"
2009-07-02 04:10:51 +01:00
martin speleo
620040bde1 [svn] Fixed accidental removal of fading in margin pictures from main page of eye candy site.
Reduced non eye candy margins.
Moved set contents style height function into main.js from being embeded js, and ran when eye candy is turned on.  Remove style attribute when eye candy is turned back off.
2009-06-28 23:11:45 +01:00
martin speleo
22aa9990a5 [svn] Have different css for plain and eye candy views. 2009-06-28 22:23:56 +01:00
goatchurch
16b7404d9b [svn] horrid .svns copied accidentally 2009-06-28 21:26:35 +01:00
goatchurch
db5e315db0 [svn] forgot to add directory 2009-06-28 21:22:16 +01:00
goatchurch
4c87ce59d3 [svn] with command option 2009-06-28 20:47:11 +01:00
martin speleo
ca7bc171c9 [svn] Fixed small semantics issues stopping base.js working with IE.
Made toggle eyecandy persistent (using a cookie)
Made toggle eyecandy turn off footer menu images
Only load footer menu images if the eyecandy is being used.
2009-06-28 19:33:24 +01:00
substantialnoninfringinguser
b55b17ccc1 [svn] Make header scroll with page because Julian said so 2009-06-19 15:38:32 +01:00
substantialnoninfringinguser
59830c80af [svn] Add readme with installation instructions. 2009-06-19 07:02:25 +01:00
substantialnoninfringinguser
b4a63eca02 [svn] Adding logbook export features. Troggle can now produce .txt or .html logbooks through the controlPanel or via an action in the LogbookEntry admin pages. 2009-06-18 06:53:52 +01:00
substantialnoninfringinguser
0306723c95 [svn] Whoops, forgot to add the file in last revision. 2009-06-14 04:36:19 +01:00
substantialnoninfringinguser
af9743026e [svn] Added beginnings subcaves parser. This required importing more information from cavetab, namely the location where the main cave page appeared on the old expo website. 2009-06-14 04:33:19 +01:00
substantialnoninfringinguser
9b44731c33 [svn] * Fix bugs that were causing broken wikilinks. *Add edit link to mugshots. *make admin url trailing-slash tolerant 2009-06-12 05:39:30 +01:00
substantialnoninfringinguser
5946e159bc [svn] Just realized it makes no sense to have qms ticked off by a logbook entry as an inline. Instead, we need some kind of drop down list where ticked off qms can be searched for and selected. Should be doable. 2009-06-11 06:37:07 +01:00
substantialnoninfringinguser
327ea9cacf [svn] Edited wiki page through web user interface. 2009-06-11 06:35:18 +01:00
substantialnoninfringinguser
6d6991e266 [svn] Added detection of noinfo in cave parser. It sets the non_public flag to true, and the view then shows nonpublic.html instead of the cave if the user isn't logged in. 2009-06-10 17:47:05 +01:00
substantialnoninfringinguser
e4ea57932e [svn] Whoops, forgot the template during last commit. 2009-06-10 06:37:38 +01:00
substantialnoninfringinguser
484a17d496 [svn] * Added non-public field for protecting copyright info etc. Field is on all models but needs to be checked for in views. So far, only the cave view checks.
* Added the Person wiki syntax which looks like [[person:John Doe]]
2009-06-10 06:34:50 +01:00
substantialnoninfringinguser
1d421b2d7c [svn] Fixed a bug with QMs with numbers between 1 and 10, and fixed the links in the recent changes box. 2009-06-10 05:37:53 +01:00
substantialnoninfringinguser
4ce282b88b [svn] Created wiki page through web user interface. 2009-06-10 00:22:29 +01:00
substantialnoninfringinguser
85ada36973 [svn] * Added admin inlines for QMs in LogbookEntry model
* Added QM list edit view
* Fixed "recent changes" box on front page
2009-06-10 00:05:02 +01:00
substantialnoninfringinguser
a3e42d3b19 [svn] 2009-06-09 23:13:11 +01:00
goatchurch
542f55d43e [svn] backup settings 2009-06-09 19:52:32 +01:00
goatchurch
d87f221a2b [svn] fix the revert and css 2009-06-09 19:15:31 +01:00
goatchurch
6237a19d17 [svn] the ajax page 2009-06-09 19:13:48 +01:00
goatchurch
17175637dc [svn] codemirror 2009-06-09 18:59:54 +01:00
substantialnoninfringinguser
32b5c7fbb0 [svn] fix logfile setting 2009-06-09 18:20:55 +01:00
substantialnoninfringinguser
ef47d092e6 [svn] Edited wiki page through web user interface. 2009-06-09 02:29:21 +01:00
substantialnoninfringinguser
8648c85b67 [svn] Edited wiki page through web user interface. 2009-06-09 02:21:30 +01:00
substantialnoninfringinguser
657c37d45c [svn] Created wiki page through web user interface. Lost the goddamn thing twice now due to browser crash and stupid back button so it's not done but I'm saving it anyway! 2009-06-09 02:06:13 +01:00
substantialnoninfringinguser
006becf6ca [svn] Removed redundant fields "date" and "place" from Persontrip model. A PersonTrip's date and place are stored in its parent LogbookEntry. PersonTrips are the people who participate in the trip in a LogbookEntry, so it would make no sense to have different dates and places from the LogbookEntry they are foreignkeyed to. 2009-06-09 00:29:00 +01:00
substantialnoninfringinguser
012d948193 [svn] Rewrote get_absolute_url methods of models to use urlparse.urljoin instead of just +ing the urls together. This fixes problems with double slashes. 2009-06-08 20:16:18 +01:00
pjrharley
a048adcdac [svn] A few registration updates
-display an error for nonmatching passwords
-display an error for short passwords
-dont direct people to http://http://sitename....
2009-05-30 16:17:19 +01:00
substantialnoninfringinguser
b091e8eb09 [svn] Have control panel display an error for logged in, non-superuser users. 2009-05-24 23:24:59 +01:00
pjrharley
14b39d906c [svn] Use the django compatability thing - webserver might have old python on it.... 2009-05-23 21:13:53 +01:00
substantialnoninfringinguser
0508ba299c [svn] Fix mistakes in export admin actions. The python serializer only works on simple objects (lists, dicts etc) and not model instances so nix that part. 2009-05-23 20:46:10 +01:00
substantialnoninfringinguser
02db5a9170 [svn] Re-enable JSON and XML export actions in admin pages now that troggle is using latest SVN version of Django. 2009-05-23 20:37:42 +01:00
substantialnoninfringinguser
93a68ff43e [svn] Fix broken admin link. 2009-05-23 20:06:05 +01:00
substantialnoninfringinguser
97e423ba86 [svn] fix imports 2009-05-23 16:51:21 +01:00
substantialnoninfringinguser
3033f1eecd [svn] Created wiki page through web user interface. 2009-05-22 22:38:41 +01:00
pjrharley
f4405a16f1 [svn] Dont say activation failed if it didn't\! 2009-05-22 21:02:48 +01:00
pjrharley
025b743070 [svn] Accidentally commited another change... so might as well add the template to go with it. Send activation email as text and html so the link is clickable 2009-05-22 21:02:24 +01:00
pjrharley
e27f5565cb [svn] Use hashlib rather than depreciated sha 2009-05-22 20:59:03 +01:00
substantialnoninfringinguser
7fe5cd6ede [svn] Edited wiki page through web user interface. 2009-05-22 08:17:17 +01:00
substantialnoninfringinguser
7052355596 [svn] Edited wiki page through web user interface. 2009-05-22 07:59:37 +01:00
substantialnoninfringinguser
1e6d1a9f2f [svn] Created wiki page through web user interface. 2009-05-22 07:58:58 +01:00
substantialnoninfringinguser
a776c6ba13 [svn] Created wiki page through web user interface. 2009-05-22 07:47:11 +01:00
substantialnoninfringinguser
75f782ab71 [svn] more survey binder updates 2009-05-22 06:49:13 +01:00
substantialnoninfringinguser
832f56a6d0 [svn] fix wrongly named template tags 2009-05-22 06:43:25 +01:00
substantialnoninfringinguser
f6d3a7c84e [svn] switched from dodgy manually writing to logfile to using python's logging module, which seems great 2009-05-22 06:17:24 +01:00
substantialnoninfringinguser
7769a35f07 [svn] - Remove feature (admin JSON / XML downloads) which won't work until we have django 1.1 installed (works on my SVN version, but not on seagrass debian package version).
- Copy feincms media to project so that we don't have to serve it separately. Also useful because we may want to customize it.
2009-05-22 02:54:09 +01:00
substantialnoninfringinguser
c38dfd20a1 [svn] * Make subcave urls work.
* Add json and xml download to admin.
2009-05-22 01:50:16 +01:00
substantialnoninfringinguser
83634fe95a [svn] minor logfile mistake 2009-05-21 22:55:08 +01:00
substantialnoninfringinguser
e336e9c770 [svn] allow the recreate tables thing on control panel to work 2009-05-21 20:46:24 +01:00
substantialnoninfringinguser
3ac1169aa7 [svn] fix minor logfile error 2009-05-21 20:24:21 +01:00
substantialnoninfringinguser
3d8a6fb55a [svn] 2009-05-21 20:17:07 +01:00
substantialnoninfringinguser
891b3abb44 [svn] Updates to allow subcave tree with nice admin. 2009-05-21 19:47:19 +01:00
substantialnoninfringinguser
01b0980c44 [svn] forgot to add earlier 2009-05-20 03:28:48 +01:00
substantialnoninfringinguser
2c2f11be39 [svn] 2009-05-19 06:32:42 +01:00
substantialnoninfringinguser
d71078d03d [svn] 2009-05-18 04:30:26 +01:00
substantialnoninfringinguser
12009e36df [svn] Turn main menu into dropdown (well actually, drop up) menu. 2009-05-18 04:25:42 +01:00
substantialnoninfringinguser
21c39f70de [svn] - Make control panel downloads (qm.csv for each cave, CAVETAB2.CSV) work.
- Fix problems in QM parsing script
2009-05-17 04:31:23 +01:00
substantialnoninfringinguser
7566faf77b [svn] Make the workaround to avoid parsing interlaced pngs actually work (see issue # 14) 2009-05-15 03:56:11 +01:00
substantialnoninfringinguser
f27d5988f0 [svn] semi ugly hack... 2009-05-15 03:38:11 +01:00
substantialnoninfringinguser
d8a215a575 [svn] Add: new generic object list template object_list.html, and convenience filter named "link" for making links from objects, and make expeditions list page using those two. Also, fixed survey parsing in databaseReset.py 2009-05-15 03:29:19 +01:00
substantialnoninfringinguser
118d132797 [svn] Forgot to upload with earlier commit 2009-05-14 14:24:46 +01:00
substantialnoninfringinguser
06487e5534 [svn] localsettings should override settings, so the import should be at the bottom of the file, unless someone has a better way of doing this 2009-05-14 06:39:36 +01:00
substantialnoninfringinguser
c0b73d4777 [svn] 2009-05-14 06:32:58 +01:00
substantialnoninfringinguser
e9e755b517 [svn] Fixed broken buttons on controlpanel, added CAVETAB2.CSV export and download buttons and made them work too.
Changed ordering on PersonExpeditions so that it is based on their expedition. That way, even if we don't have date info on when a user was on expo exactly, pages like personindex work correctly.
2009-05-14 06:19:46 +01:00
substantialnoninfringinguser
191619e6d8 [svn] Add link to google code issue tracker 2009-05-13 07:01:45 +01:00
substantialnoninfringinguser
0f64e786b5 [svn] Made the subcaves work! Now we just have to figure out how to parse them...
Copied from http://cucc@cucc.survex.com/svn/trunk/expoweb/troggle/, rev. 8343 by cucc @ 5/11/2009 6:36 AM
2009-05-13 06:28:36 +01:00
substantialnoninfringinguser
7164296c9d [svn]
Copied from http://cucc@cucc.survex.com/svn/trunk/expoweb/troggle/, rev. 8342 by cucc @ 5/11/2009 3:23 AM
2009-05-13 06:27:45 +01:00
substantialnoninfringinguser
787445c071 [svn]
Copied from http://cucc@cucc.survex.com/svn/trunk/expoweb/troggle/, rev. 8341 by cucc @ 5/11/2009 3:21 AM
2009-05-13 06:27:00 +01:00
substantialnoninfringinguser
d9d119c0c9 [svn] django-evolution is optional so shouldn't be in main settings
Copied from http://cucc@cucc.survex.com/svn/trunk/expoweb/troggle/, rev. 8340 by cucc @ 5/11/2009 3:18 AM
2009-05-13 06:26:17 +01:00
substantialnoninfringinguser
c45eb31e8f [svn] Switch from photologue to imagekit. Less bloat.
Copied from http://cucc@cucc.survex.com/svn/trunk/expoweb/troggle/, rev. 8338 by cucc @ 5/11/2009 3:08 AM
2009-05-13 06:24:52 +01:00
substantialnoninfringinguser
b31d022c1a [svn] Dynamic thumbnail generation for photos and survey scans using imagekit, further improving registration system, other misc.
Copied from http://cucc@cucc.survex.com/svn/trunk/expoweb/troggle/, rev. 8336 by cucc @ 5/10/2009 11:05 PM
2009-05-13 06:23:57 +01:00
substantialnoninfringinguser
919c7e932a [svn] Fixes to deal with reorganization of expo surveys repository. Now that survey scans and Surveys.csv are in different directories, we have two settings variables, settings.SURVEYS for the root of the survey repo, and settings.SURVEY_SCANS for the surveyscans directory.
Fixed tab / indent muck in surveys parser. Commented out some "file abstraction" stuff for the time being.
Copied from http://cucc@cucc.survex.com/svn/trunk/expoweb/troggle/, rev. 8335 by cucc @ 5/10/2009 7:26 AM
2009-05-13 06:22:53 +01:00
substantialnoninfringinguser
9489fe56d9 [svn] Improve registration system.
Add jquery fade effects and quick search.
Copied from http://cucc@cucc.survex.com/svn/trunk/expoweb/troggle/, rev. 8334 by cucc @ 5/10/2009 5:23 AM
2009-05-13 06:22:07 +01:00
87 changed files with 2970 additions and 4394 deletions

16
.gitignore vendored Normal file
View File

@@ -0,0 +1,16 @@
# use glob syntax
syntax: glob
*.pyc
db*
localsettings.py
*~
parsing_log.txt
troggle
troggle_log.txt
.idea/*
*.orig
media/images/*
.vscode/*
.swp
imagekit-off/

View File

@@ -1,13 +1,13 @@
Troggle is an application for caving expedition data management, originally created for use on Cambridge University Caving Club (CUCC)expeditions and licensed under the GNU Lesser General Public License. Troggle is an application for caving expedition data management, originally created for use on Cambridge University Caving Club (CUCC)expeditions and licensed under the GNU Lesser General Public License.
Troggle has been forked into two projects. The original one is maintained by Aron Curtis and is used for Erebus caves. The CUCC variant uses files as the definitive data, not the database and lives at expo.sruvex.com/troggle. Troggle has been forked into two projects. The original one is maintained by Aron Curtis and is used for Erebus caves. The CUCC variant uses files as the definitive data, not the database and lives at expo.survex.com/troggle.
Troggle setup Troggle setup
========== ==========
Python, Django, and Database setup Python, Django, and Database setup
----------------------------------- -----------------------------------
Troggle requires Django 1.4 or greater, and any version of Python that works with it. Troggle requires Django 1.10, and Python 2.7.
Install Django with the following command: Install Django with the following command:
apt-get install python-django (on debian/ubuntu) apt-get install python-django (on debian/ubuntu)
@@ -18,12 +18,18 @@ If you want to use MySQL or Postgresql, download and install them. However, you
Troggle itself Troggle itself
------------- -------------
Choose a directory where you will keep troggle, and svn check out Troggle into it using the following command: Choose a directory where you will keep troggle, and git clone Troggle into it using the following command:
svn co http://troggle.googlecode.com/svn/ git clone git://expo.survex.com/~/troggle
or more reliably
git clone ssh://expo@expo.survex.com/home/expo/troggle
Running in development
----------------------
The simplest way to run Troggle in development is through the docker-compose setup
See the docker folder in the repo for details
If you want to work on the source code and be able to commit, you will need to use https instead of http, and your google account will need to be added to the troggle project members list. Contact aaron dot curtis at cantab dot net to get this set up. If you want to work on the source code and be able to commit, your account will need to be added to the troggle project members list. Contact wookey at wookware dot org to get this set up.
Next, you need to fill in your local settings. Copy either localsettingsubuntu.py or localsettingsserver.py to a new file called localsettings.py. Follow the instructions contained in the file to fill out your settings. Next, you need to fill in your local settings. Copy either localsettingsubuntu.py or localsettingsserver.py to a new file called localsettings.py. Follow the instructions contained in the file to fill out your settings.
@@ -35,7 +41,7 @@ Run "python databaseReset.py reset" from the troggle directory.
Once troggle is running, you can also log in and then go to "Import / export" data under "admin" on the menu. Once troggle is running, you can also log in and then go to "Import / export" data under "admin" on the menu.
Adding a new year/expedition requires adding a column to the Adding a new year/expedition requires adding a column to the
noinfo/folk.csv table - a year doesn't exist until that is done. folk/folk.csv table - a year doesn't exist until that is done.
Running a Troggle server Running a Troggle server

View File

@@ -9,12 +9,12 @@ from troggle.core.views_other import downloadLogbook
class TroggleModelAdmin(admin.ModelAdmin): class TroggleModelAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change): def save_model(self, request, obj, form, change):
"""overriding admin save to fill the new_since parsing_field""" """overriding admin save to fill the new_since parsing_field"""
obj.new_since_parsing=True obj.new_since_parsing=True
obj.save() obj.save()
class Media: class Media:
js = ('jquery/jquery.min.js','js/QM_helper.js') js = ('jquery/jquery.min.js','js/QM_helper.js')
@@ -28,6 +28,10 @@ class SurvexBlockAdmin(TroggleModelAdmin):
inlines = (RoleInline,) inlines = (RoleInline,)
class SurvexStationAdmin(TroggleModelAdmin):
search_fields = ('name', 'block__name')
class ScannedImageInline(admin.TabularInline): class ScannedImageInline(admin.TabularInline):
model = ScannedImage model = ScannedImage
extra = 4 extra = 4
@@ -40,7 +44,7 @@ class OtherCaveInline(admin.TabularInline):
class SurveyAdmin(TroggleModelAdmin): class SurveyAdmin(TroggleModelAdmin):
inlines = (ScannedImageInline,) inlines = (ScannedImageInline,)
search_fields = ('expedition__year','wallet_number') search_fields = ('expedition__year','wallet_number')
class QMsFoundInline(admin.TabularInline): class QMsFoundInline(admin.TabularInline):
@@ -48,7 +52,7 @@ class QMsFoundInline(admin.TabularInline):
fk_name='found_by' fk_name='found_by'
fields=('number','grade','location_description','comment')#need to add foreignkey to cave part fields=('number','grade','location_description','comment')#need to add foreignkey to cave part
extra=1 extra=1
class PhotoInline(admin.TabularInline): class PhotoInline(admin.TabularInline):
model = DPhoto model = DPhoto
@@ -64,7 +68,7 @@ class PersonTripInline(admin.TabularInline):
#class LogbookEntryAdmin(VersionAdmin): #class LogbookEntryAdmin(VersionAdmin):
class LogbookEntryAdmin(TroggleModelAdmin): class LogbookEntryAdmin(TroggleModelAdmin):
prepopulated_fields = {'slug':("title",)} prepopulated_fields = {'slug':("title",)}
search_fields = ('title','expedition__year') search_fields = ('title','expedition__year')
date_heirarchy = ('date') date_heirarchy = ('date')
inlines = (PersonTripInline, PhotoInline, QMsFoundInline) inlines = (PersonTripInline, PhotoInline, QMsFoundInline)
@@ -73,12 +77,12 @@ class LogbookEntryAdmin(TroggleModelAdmin):
"all": ("css/troggleadmin.css",) "all": ("css/troggleadmin.css",)
} }
actions=('export_logbook_entries_as_html','export_logbook_entries_as_txt') actions=('export_logbook_entries_as_html','export_logbook_entries_as_txt')
def export_logbook_entries_as_html(modeladmin, request, queryset): def export_logbook_entries_as_html(self, modeladmin, request, queryset):
response=downloadLogbook(request=request, queryset=queryset, extension='html') response=downloadLogbook(request=request, queryset=queryset, extension='html')
return response return response
def export_logbook_entries_as_txt(modeladmin, request, queryset): def export_logbook_entries_as_txt(self, modeladmin, request, queryset):
response=downloadLogbook(request=request, queryset=queryset, extension='txt') response=downloadLogbook(request=request, queryset=queryset, extension='txt')
return response return response
@@ -95,11 +99,11 @@ class PersonAdmin(TroggleModelAdmin):
class QMAdmin(TroggleModelAdmin): class QMAdmin(TroggleModelAdmin):
search_fields = ('found_by__cave__kataster_number','number','found_by__date') search_fields = ('found_by__cave__kataster_number','number','found_by__date')
list_display = ('__unicode__','grade','found_by','ticked_off_by') list_display = ('__unicode__','grade','found_by','ticked_off_by','nearest_station')
list_display_links = ('__unicode__',) list_display_links = ('__unicode__',)
list_editable = ('found_by','ticked_off_by','grade') list_editable = ('found_by','ticked_off_by','grade','nearest_station')
list_per_page = 20 list_per_page = 20
raw_id_fields=('found_by','ticked_off_by') raw_id_fields=('found_by','ticked_off_by','nearest_station')
class PersonExpeditionAdmin(TroggleModelAdmin): class PersonExpeditionAdmin(TroggleModelAdmin):
@@ -118,37 +122,41 @@ class EntranceAdmin(TroggleModelAdmin):
admin.site.register(DPhoto) admin.site.register(DPhoto)
admin.site.register(Cave, CaveAdmin) admin.site.register(Cave, CaveAdmin)
admin.site.register(CaveSlug)
admin.site.register(Area) admin.site.register(Area)
#admin.site.register(OtherCaveName) #admin.site.register(OtherCaveName)
admin.site.register(CaveAndEntrance) admin.site.register(CaveAndEntrance)
admin.site.register(NewSubCave) admin.site.register(NewSubCave)
admin.site.register(CaveDescription) admin.site.register(CaveDescription)
admin.site.register(Entrance, EntranceAdmin) admin.site.register(Entrance, EntranceAdmin)
admin.site.register(SurvexBlock, SurvexBlockAdmin)
admin.site.register(Expedition) admin.site.register(Expedition)
admin.site.register(Person,PersonAdmin) admin.site.register(Person,PersonAdmin)
admin.site.register(SurvexPersonRole)
admin.site.register(PersonExpedition,PersonExpeditionAdmin) admin.site.register(PersonExpedition,PersonExpeditionAdmin)
admin.site.register(LogbookEntry, LogbookEntryAdmin) admin.site.register(LogbookEntry, LogbookEntryAdmin)
#admin.site.register(PersonTrip) #admin.site.register(PersonTrip)
admin.site.register(QM, QMAdmin) admin.site.register(QM, QMAdmin)
admin.site.register(Survey, SurveyAdmin) admin.site.register(Survey, SurveyAdmin)
admin.site.register(ScannedImage) admin.site.register(ScannedImage)
admin.site.register(SurvexStation)
admin.site.register(SurvexDirectory)
admin.site.register(SurvexFile)
admin.site.register(SurvexStation, SurvexStationAdmin)
admin.site.register(SurvexBlock)
admin.site.register(SurvexPersonRole)
admin.site.register(SurvexScansFolder) admin.site.register(SurvexScansFolder)
admin.site.register(SurvexScanSingle) admin.site.register(SurvexScanSingle)
admin.site.register(DataIssue)
def export_as_json(modeladmin, request, queryset): def export_as_json(modeladmin, request, queryset):
response = HttpResponse(mimetype="text/json") response = HttpResponse(content_type="text/json")
response['Content-Disposition'] = 'attachment; filename=troggle_output.json' response['Content-Disposition'] = 'attachment; filename=troggle_output.json'
serializers.serialize("json", queryset, stream=response) serializers.serialize("json", queryset, stream=response)
return response return response
def export_as_xml(modeladmin, request, queryset): def export_as_xml(modeladmin, request, queryset):
response = HttpResponse(mimetype="text/xml") response = HttpResponse(content_type="text/xml")
response['Content-Disposition'] = 'attachment; filename=troggle_output.xml' response['Content-Disposition'] = 'attachment; filename=troggle_output.xml'
serializers.serialize("xml", queryset, stream=response) serializers.serialize("xml", queryset, stream=response)
return response return response

View File

@@ -15,7 +15,7 @@ def listdir(*path):
for p in os.listdir(root): for p in os.listdir(root):
if os.path.isdir(os.path.join(root, p)): if os.path.isdir(os.path.join(root, p)):
l += p + "/\n" l += p + "/\n"
elif os.path.isfile(os.path.join(root, p)): elif os.path.isfile(os.path.join(root, p)):
l += p + "\n" l += p + "\n"
#Ignore non-files and non-directories #Ignore non-files and non-directories
@@ -28,7 +28,7 @@ def listdir(*path):
c = c.replace("#", "%23") c = c.replace("#", "%23")
print("FILE: ", settings.FILES + "listdir/" + c) print("FILE: ", settings.FILES + "listdir/" + c)
return urllib.urlopen(settings.FILES + "listdir/" + c).read() return urllib.urlopen(settings.FILES + "listdir/" + c).read()
def dirsAsList(*path): def dirsAsList(*path):
return [d for d in listdir(*path).split("\n") if len(d) > 0 and d[-1] == "/"] return [d for d in listdir(*path).split("\n") if len(d) > 0 and d[-1] == "/"]

View File

@@ -16,7 +16,7 @@ class CaveForm(ModelForm):
underground_centre_line = forms.CharField(required = False, widget=forms.Textarea()) underground_centre_line = forms.CharField(required = False, widget=forms.Textarea())
notes = forms.CharField(required = False, widget=forms.Textarea()) notes = forms.CharField(required = False, widget=forms.Textarea())
references = forms.CharField(required = False, widget=forms.Textarea()) references = forms.CharField(required = False, widget=forms.Textarea())
url = forms.CharField(required = True) url = forms.CharField(required = True)
class Meta: class Meta:
model = Cave model = Cave
exclude = ("filename",) exclude = ("filename",)
@@ -24,9 +24,9 @@ class CaveForm(ModelForm):
def clean(self): def clean(self):
if self.cleaned_data.get("kataster_number") == "" and self.cleaned_data.get("unofficial_number") == "": if self.cleaned_data.get("kataster_number") == "" and self.cleaned_data.get("unofficial_number") == "":
self._errors["unofficial_number"] = self.error_class(["Either the kataster or unoffical number is required."]) self._errors["unofficial_number"] = self.error_class(["Either the kataster or unoffical number is required."])
if self.cleaned_data.get("kataster_number") != "" and self.cleaned_data.get("official_name") == "": if self.cleaned_data.get("kataster_number") != "" and self.cleaned_data.get("official_name") == "":
self._errors["official_name"] = self.error_class(["This field is required when there is a kataster number."]) self._errors["official_name"] = self.error_class(["This field is required when there is a kataster number."])
if self.cleaned_data.get("area") == []: if self.cleaned_data.get("area") == []:
self._errors["area"] = self.error_class(["This field is required."]) self._errors["area"] = self.error_class(["This field is required."])
if self.cleaned_data.get("url") and self.cleaned_data.get("url").startswith("/"): if self.cleaned_data.get("url") and self.cleaned_data.get("url").startswith("/"):
@@ -46,12 +46,12 @@ class EntranceForm(ModelForm):
#underground_centre_line = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10})) #underground_centre_line = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
#notes = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10})) #notes = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
#references = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10})) #references = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
other_station = forms.CharField(required=False) # Trying to change this to a singl;e line entry other_station = forms.CharField(required=False) # Trying to change this to a single line entry
tag_station = forms.CharField(required=False) # Trying to change this to a singl;e line entry tag_station = forms.CharField(required=False) # Trying to change this to a single line entry
exact_station = forms.CharField(required=False) # Trying to change this to a singl;e line entry exact_station = forms.CharField(required=False) # Trying to change this to a single line entry
northing = forms.CharField(required=False) # Trying to change this to a singl;e line entry northing = forms.CharField(required=False) # Trying to change this to a single line entry
easting = forms.CharField(required=False) # Trying to change this to a singl;e line entry easting = forms.CharField(required=False) # Trying to change this to a single line entry
alt = forms.CharField(required=False) # Trying to change this to a singl;e line entry alt = forms.CharField(required=False) # Trying to change this to a single line entry
class Meta: class Meta:
model = Entrance model = Entrance
exclude = ("cached_primary_slug", "filename",) exclude = ("cached_primary_slug", "filename",)
@@ -82,11 +82,11 @@ class EntranceLetterForm(ModelForm):
# This function returns html-formatted paragraphs for each of the # This function returns html-formatted paragraphs for each of the
# wikilink types that are related to this logbookentry. Each paragraph # wikilink types that are related to this logbookentry. Each paragraph
# contains a list of all of the related wikilinks. # contains a list of all of the related wikilinks.
# #
# Perhaps an admin javascript solution would be better. # Perhaps an admin javascript solution would be better.
# """ # """
# res = ["Please use the following wikilinks, which are related to this logbook entry:"] # res = ["Please use the following wikilinks, which are related to this logbook entry:"]
# #
# res.append(r'</p><p style="float: left;"><b>QMs found:</b>') # res.append(r'</p><p style="float: left;"><b>QMs found:</b>')
# for QM in LogbookEntry.instance.QMs_found.all(): # for QM in LogbookEntry.instance.QMs_found.all():
# res.append(QM.wiki_link()) # res.append(QM.wiki_link())
@@ -94,12 +94,12 @@ class EntranceLetterForm(ModelForm):
# res.append(r'</p><p style="float: left;"><b>QMs ticked off:</b>') # res.append(r'</p><p style="float: left;"><b>QMs ticked off:</b>')
# for QM in LogbookEntry.instance.QMs_ticked_off.all(): # for QM in LogbookEntry.instance.QMs_ticked_off.all():
# res.append(QM.wiki_link()) # res.append(QM.wiki_link())
# res.append(r'</p><p style="float: left; "><b>People</b>') # res.append(r'</p><p style="float: left; "><b>People</b>')
# for persontrip in LogbookEntry.instance.persontrip_set.all(): # for persontrip in LogbookEntry.instance.persontrip_set.all():
# res.append(persontrip.wiki_link()) # res.append(persontrip.wiki_link())
# res.append(r'</p>') # res.append(r'</p>')
# return string.join(res, r'<br />') # return string.join(res, r'<br />')
# def __init__(self, *args, **kwargs): # def __init__(self, *args, **kwargs):
@@ -107,7 +107,7 @@ class EntranceLetterForm(ModelForm):
# self.fields['text'].help_text=self.wikiLinkHints()# # self.fields['text'].help_text=self.wikiLinkHints()#
#class CaveForm(forms.Form): #class CaveForm(forms.Form):
# html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30})) # html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30}))
def getTripForm(expedition): def getTripForm(expedition):
@@ -118,18 +118,18 @@ def getTripForm(expedition):
caves.sort() caves.sort()
caves = ["-----"] + caves caves = ["-----"] + caves
cave = forms.ChoiceField([(c, c) for c in caves], required=False) cave = forms.ChoiceField([(c, c) for c in caves], required=False)
location = forms.CharField(max_length=200, required=False) location = forms.CharField(max_length=200, required=False)
caveOrLocation = forms.ChoiceField([("cave", "Cave"), ("location", "Location")], widget = forms.widgets.RadioSelect()) caveOrLocation = forms.ChoiceField([("cave", "Cave"), ("location", "Location")], widget = forms.widgets.RadioSelect())
html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30})) html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30}))
def clean(self): def clean(self):
print dir(self) print(dir(self))
if self.cleaned_data.get("caveOrLocation") == "cave" and not self.cleaned_data.get("cave"): if self.cleaned_data.get("caveOrLocation") == "cave" and not self.cleaned_data.get("cave"):
self._errors["cave"] = self.error_class(["This field is required"]) self._errors["cave"] = self.error_class(["This field is required"])
if self.cleaned_data.get("caveOrLocation") == "location" and not self.cleaned_data.get("location"): if self.cleaned_data.get("caveOrLocation") == "location" and not self.cleaned_data.get("location"):
self._errors["location"] = self.error_class(["This field is required"]) self._errors["location"] = self.error_class(["This field is required"])
return self.cleaned_data return self.cleaned_data
class PersonTripForm(forms.Form): class PersonTripForm(forms.Form):
names = [get_name(pe) for pe in PersonExpedition.objects.filter(expedition = expedition)] names = [get_name(pe) for pe in PersonExpedition.objects.filter(expedition = expedition)]
names.sort() names.sort()
@@ -141,7 +141,7 @@ def getTripForm(expedition):
PersonTripFormSet = formset_factory(PersonTripForm, extra=1) PersonTripFormSet = formset_factory(PersonTripForm, extra=1)
return PersonTripFormSet, TripForm return PersonTripFormSet, TripForm
def get_name(pe): def get_name(pe):
if pe.nickname: if pe.nickname:
return pe.nickname return pe.nickname
@@ -162,18 +162,18 @@ def get_name(pe):
# caves = ["-----"] + caves # caves = ["-----"] + caves
# cave = forms.ChoiceField([(c, c) for c in caves], required=False) # cave = forms.ChoiceField([(c, c) for c in caves], required=False)
# entrance = forms.ChoiceField([("-----", "Please select a cave"), ], required=False) # entrance = forms.ChoiceField([("-----", "Please select a cave"), ], required=False)
# qm = forms.ChoiceField([("-----", "Please select a cave"), ], required=False) # qm = forms.ChoiceField([("-----", "Please select a cave"), ], required=False)
# expeditions = [e.year for e in Expedition.objects.all()] # expeditions = [e.year for e in Expedition.objects.all()]
# expeditions.sort() # expeditions.sort()
# expeditions = ["-----"] + expeditions # expeditions = ["-----"] + expeditions
# expedition = forms.ChoiceField([(e, e) for e in expeditions], required=False) # expedition = forms.ChoiceField([(e, e) for e in expeditions], required=False)
# logbookentry = forms.ChoiceField([("-----", "Please select an expedition"), ], required=False) # logbookentry = forms.ChoiceField([("-----", "Please select an expedition"), ], required=False)
# person = forms.ChoiceField([("-----", "Please select an expedition"), ], required=False)
# person = forms.ChoiceField([("-----", "Please select an expedition"), ], required=False)
# survey_point = forms.CharField() # survey_point = forms.CharField()

View File

@@ -1,21 +1,21 @@
from imagekit.specs import ImageSpec from imagekit.specs import ImageSpec
from imagekit import processors from imagekit import processors
class ResizeThumb(processors.Resize): class ResizeThumb(processors.Resize):
width = 100 width = 100
crop = False crop = False
class ResizeDisplay(processors.Resize): class ResizeDisplay(processors.Resize):
width = 600 width = 600
#class EnhanceThumb(processors.Adjustment): #class EnhanceThumb(processors.Adjustment):
#contrast = 1.2 #contrast = 1.2
#sharpness = 2 #sharpness = 2
class Thumbnail(ImageSpec): class Thumbnail(ImageSpec):
access_as = 'thumbnail_image' access_as = 'thumbnail_image'
pre_cache = True pre_cache = True
processors = [ResizeThumb] processors = [ResizeThumb]
class Display(ImageSpec): class Display(ImageSpec):
increment_count = True increment_count = True

View File

@@ -2,6 +2,14 @@ from django.core.management.base import BaseCommand, CommandError
from optparse import make_option from optparse import make_option
from troggle.core.models import Cave from troggle.core.models import Cave
import settings import settings
import os
from django.db import connection
from django.core import management
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from troggle.core.models import Cave, Entrance
import troggle.flatpages.models
databasename=settings.DATABASES['default']['NAME'] databasename=settings.DATABASES['default']['NAME']
expouser=settings.EXPOUSER expouser=settings.EXPOUSER
@@ -12,22 +20,13 @@ class Command(BaseCommand):
help = 'This is normal usage, clear database and reread everything' help = 'This is normal usage, clear database and reread everything'
option_list = BaseCommand.option_list + ( option_list = BaseCommand.option_list + (
make_option('--foo', make_option('--reset',
action='store_true', action='store_true',
dest='foo', dest='reset',
default=False, default=False,
help='test'), help='Reset the entier DB from files'),
) )
def add_arguments(self, parser):
parser.add_argument(
'--foo',
action='store_true',
dest='foo',
help='Help text',
)
def handle(self, *args, **options): def handle(self, *args, **options):
print(args) print(args)
print(options) print(options)
@@ -46,8 +45,8 @@ class Command(BaseCommand):
self.import_QMs() self.import_QMs()
elif "tunnel" in args: elif "tunnel" in args:
self.import_tunnelfiles() self.import_tunnelfiles()
elif "reset" in args: elif options['reset']:
self.reset() self.reset(self)
elif "survex" in args: elif "survex" in args:
self.import_survex() self.import_survex()
elif "survexpos" in args: elif "survexpos" in args:
@@ -61,13 +60,15 @@ class Command(BaseCommand):
self.dumplogbooks() self.dumplogbooks()
elif "writeCaves" in args: elif "writeCaves" in args:
self.writeCaves() self.writeCaves()
elif "foo" in args: elif options['foo']:
self.stdout.write('Tesing....') self.stdout.write(self.style.WARNING('Tesing....'))
else: else:
self.stdout.write("%s not recognised" % args) #self.stdout.write("%s not recognised" % args)
self.usage(options) #self.usage(options)
self.stdout.write("poo")
#print(args)
def reload_db(): def reload_db(obj):
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3': if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
try: try:
os.remove(databasename) os.remove(databasename)
@@ -86,22 +87,22 @@ class Command(BaseCommand):
user.is_superuser = True user.is_superuser = True
user.save() user.save()
def make_dirs(): def make_dirs(obj):
"""Make directories that troggle requires""" """Make directories that troggle requires"""
# should also deal with permissions here. # should also deal with permissions here.
if not os.path.isdir(settings.PHOTOS_ROOT): if not os.path.isdir(settings.PHOTOS_ROOT):
os.mkdir(settings.PHOTOS_ROOT) os.mkdir(settings.PHOTOS_ROOT)
def import_caves(): def import_caves(obj):
import parsers.caves import parsers.caves
print("importing caves") print("Importing Caves")
parsers.caves.readcaves() parsers.caves.readcaves()
def import_people(): def import_people(obj):
import parsers.people import parsers.people
parsers.people.LoadPersonsExpos() parsers.people.LoadPersonsExpos()
def import_logbooks(): def import_logbooks(obj):
# The below line was causing errors I didn't understand (it said LOGFILE was a string), and I couldn't be bothered to figure # The below line was causing errors I didn't understand (it said LOGFILE was a string), and I couldn't be bothered to figure
# what was going on so I just catch the error with a try. - AC 21 May # what was going on so I just catch the error with a try. - AC 21 May
try: try:
@@ -112,57 +113,57 @@ class Command(BaseCommand):
import parsers.logbooks import parsers.logbooks
parsers.logbooks.LoadLogbooks() parsers.logbooks.LoadLogbooks()
def import_survex(): def import_survex(obj):
import parsers.survex import parsers.survex
parsers.survex.LoadAllSurvexBlocks() parsers.survex.LoadAllSurvexBlocks()
parsers.survex.LoadPos() parsers.survex.LoadPos()
def import_QMs(): def import_QMs(obj):
import parsers.QMs import parsers.QMs
def import_surveys(): def import_surveys(obj):
import parsers.surveys import parsers.surveys
parsers.surveys.parseSurveys(logfile=settings.LOGFILE) parsers.surveys.parseSurveys(logfile=settings.LOGFILE)
def import_surveyscans(): def import_surveyscans(obj):
import parsers.surveys import parsers.surveys
parsers.surveys.LoadListScans() parsers.surveys.LoadListScans()
def import_tunnelfiles(): def import_tunnelfiles(obj):
import parsers.surveys import parsers.surveys
parsers.surveys.LoadTunnelFiles() parsers.surveys.LoadTunnelFiles()
def reset(): def reset(self, mgmt_obj):
""" Wipe the troggle database and import everything from legacy data """ Wipe the troggle database and import everything from legacy data
""" """
reload_db() self.reload_db()
make_dirs() self.make_dirs()
pageredirects() self.pageredirects()
import_caves() self.import_caves()
import_people() self.import_people()
import_surveyscans() self.import_surveyscans()
import_survex() self.import_survex()
import_logbooks() self.import_logbooks()
import_QMs() self.import_QMs()
try: try:
import_tunnelfiles() self.import_tunnelfiles()
except: except:
print("Tunnel files parser broken.") print("Tunnel files parser broken.")
import_surveys() self.import_surveys()
def pageredirects(): def pageredirects(obj):
for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]: for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
f = troggle.flatpages.models.Redirect(originalURL=oldURL, newURL=newURL) f = troggle.flatpages.models.Redirect(originalURL=oldURL, newURL=newURL)
f.save() f.save()
def writeCaves(): def writeCaves(obj):
for cave in Cave.objects.all(): for cave in Cave.objects.all():
cave.writeDataFile() cave.writeDataFile()
for entrance in Entrance.objects.all(): for entrance in Entrance.objects.all():
entrance.writeDataFile() entrance.writeDataFile()
def usage(self, parser): def troggle_usage(obj):
print("""Usage is 'manage.py reset_db <command>' print("""Usage is 'manage.py reset_db <command>'
where command is: where command is:
reset - this is normal usage, clear database and reread everything reset - this is normal usage, clear database and reread everything

View File

@@ -1,24 +0,0 @@
import utm
import math
from django.conf import settings
def lat_lon_entrance(utmstring):
try:
x = float(utmstring.split()[0])
y = float(utmstring.split()[1])
#return ' '+str(x+y)+' '+str(y)
q = utm.to_latlon(x, y, 33, 'U')
return "{:.5f} {:.5f}".format(q[0],q[1])
except:
return 'Not found'
def top_camp_distance(utmstring):
try:
x = float(utmstring.split()[0])
y = float(utmstring.split()[1])
tx = settings.TOPCAMPX
ty = settings.TOPCAMPY
dist = math.sqrt( (tx-x)*(tx-x) + (ty-y)*(ty-y) )
return "{:.1f}".format(dist)
except:
return 'Not found'

View File

@@ -0,0 +1,575 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-02-18 16:01
from __future__ import unicode_literals
from django.conf import settings
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
import troggle.core.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('short_name', models.CharField(max_length=100)),
('name', models.CharField(blank=True, max_length=200, null=True)),
('description', models.TextField(blank=True, null=True)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Area')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Cave',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('official_name', models.CharField(max_length=160)),
('kataster_code', models.CharField(blank=True, max_length=20, null=True)),
('kataster_number', models.CharField(blank=True, max_length=10, null=True)),
('unofficial_number', models.CharField(blank=True, max_length=60, null=True)),
('explorers', models.TextField(blank=True, null=True)),
('underground_description', models.TextField(blank=True, null=True)),
('equipment', models.TextField(blank=True, null=True)),
('references', models.TextField(blank=True, null=True)),
('survey', models.TextField(blank=True, null=True)),
('kataster_status', models.TextField(blank=True, null=True)),
('underground_centre_line', models.TextField(blank=True, null=True)),
('notes', models.TextField(blank=True, null=True)),
('length', models.CharField(blank=True, max_length=100, null=True)),
('depth', models.CharField(blank=True, max_length=100, null=True)),
('extent', models.CharField(blank=True, max_length=100, null=True)),
('survex_file', models.CharField(blank=True, max_length=100, null=True)),
('description_file', models.CharField(blank=True, max_length=200, null=True)),
('url', models.CharField(blank=True, max_length=200, null=True)),
('filename', models.CharField(max_length=200)),
('area', models.ManyToManyField(blank=True, to='core.Area')),
],
options={
'ordering': ('kataster_code', 'unofficial_number'),
},
),
migrations.CreateModel(
name='CaveAndEntrance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('entrance_letter', models.CharField(blank=True, max_length=20, null=True)),
('cave', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
],
),
migrations.CreateModel(
name='CaveDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('short_name', models.CharField(max_length=50, unique=True)),
('long_name', models.CharField(blank=True, max_length=200, null=True)),
('description', models.TextField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CaveSlug',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(unique=True)),
('primary', models.BooleanField(default=False)),
('cave', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
],
),
migrations.CreateModel(
name='DataIssue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('date', models.DateTimeField(auto_now_add=True)),
('parser', models.CharField(blank=True, max_length=50, null=True)),
('message', models.CharField(blank=True, max_length=400, null=True)),
],
options={
'ordering': ['date'],
},
),
migrations.CreateModel(
name='DPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('caption', models.CharField(blank=True, max_length=1000, null=True)),
('file', models.ImageField(storage=django.core.files.storage.FileSystemStorage(base_url=b'http://127.0.0.1:8000/photos/', location=b'/expo/expoweb/photos'), upload_to=b'.')),
('is_mugshot', models.BooleanField(default=False)),
('lon_utm', models.FloatField(blank=True, null=True)),
('lat_utm', models.FloatField(blank=True, null=True)),
('contains_cave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Entrance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('name', models.CharField(blank=True, max_length=100, null=True)),
('entrance_description', models.TextField(blank=True, null=True)),
('explorers', models.TextField(blank=True, null=True)),
('map_description', models.TextField(blank=True, null=True)),
('location_description', models.TextField(blank=True, null=True)),
('approach', models.TextField(blank=True, null=True)),
('underground_description', models.TextField(blank=True, null=True)),
('photo', models.TextField(blank=True, null=True)),
('marking', models.CharField(choices=[(b'P', b'Paint'), (b'P?', b'Paint (?)'), (b'T', b'Tag'), (b'T?', b'Tag (?)'), (b'R', b'Needs Retag'), (b'S', b'Spit'), (b'S?', b'Spit (?)'), (b'U', b'Unmarked'), (b'?', b'Unknown')], max_length=2)),
('marking_comment', models.TextField(blank=True, null=True)),
('findability', models.CharField(blank=True, choices=[(b'?', b'To be confirmed ...'), (b'S', b'Coordinates'), (b'L', b'Lost'), (b'R', b'Refindable')], max_length=1, null=True)),
('findability_description', models.TextField(blank=True, null=True)),
('alt', models.TextField(blank=True, null=True)),
('northing', models.TextField(blank=True, null=True)),
('easting', models.TextField(blank=True, null=True)),
('tag_station', models.TextField(blank=True, null=True)),
('exact_station', models.TextField(blank=True, null=True)),
('other_station', models.TextField(blank=True, null=True)),
('other_description', models.TextField(blank=True, null=True)),
('bearings', models.TextField(blank=True, null=True)),
('url', models.CharField(blank=True, max_length=200, null=True)),
('filename', models.CharField(max_length=200)),
('cached_primary_slug', models.CharField(blank=True, max_length=200, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='EntranceSlug',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(unique=True)),
('primary', models.BooleanField(default=False)),
('entrance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Entrance')),
],
),
migrations.CreateModel(
name='Expedition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('year', models.CharField(max_length=20, unique=True)),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ('-year',),
'get_latest_by': 'year',
},
),
migrations.CreateModel(
name='ExpeditionDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('date', models.DateField()),
('expedition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Expedition')),
],
options={
'ordering': ('date',),
},
),
migrations.CreateModel(
name='LogbookEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('date', models.DateTimeField()),
('title', models.CharField(max_length=200)),
('cave_slug', models.SlugField()),
('place', models.CharField(blank=True, help_text=b"Only use this if you haven't chosen a cave", max_length=100, null=True)),
('text', models.TextField()),
('slug', models.SlugField()),
('filename', models.CharField(max_length=200, null=True)),
('entry_type', models.CharField(choices=[(b'wiki', b'Wiki style logbook'), (b'html', b'Html style logbook')], default=b'wiki', max_length=50, null=True)),
('expedition', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Expedition')),
('expeditionday', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.ExpeditionDay')),
],
options={
'ordering': ('-date',),
'verbose_name_plural': 'Logbook Entries',
},
),
migrations.CreateModel(
name='NewSubCave',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('name', models.CharField(max_length=200, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='OtherCaveName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('name', models.CharField(max_length=160)),
('cave', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('fullname', models.CharField(max_length=200)),
('is_vfho', models.BooleanField(default=False, help_text=b'VFHO is the Vereines f&uuml;r H&ouml;hlenkunde in Obersteier, a nearby Austrian caving club.')),
('mug_shot', models.CharField(blank=True, max_length=100, null=True)),
('blurb', models.TextField(blank=True, null=True)),
('orderref', models.CharField(max_length=200)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('orderref',),
'verbose_name_plural': 'People',
},
),
migrations.CreateModel(
name='PersonExpedition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('slugfield', models.SlugField(blank=True, null=True)),
('is_guest', models.BooleanField(default=False)),
('expo_committee_position', models.CharField(blank=True, choices=[(b'leader', b'Expo leader'), (b'medical', b'Expo medical officer'), (b'treasurer', b'Expo treasurer'), (b'sponsorship', b'Expo sponsorship coordinator'), (b'research', b'Expo research coordinator')], max_length=200, null=True)),
('nickname', models.CharField(blank=True, max_length=100, null=True)),
('expedition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Expedition')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Person')),
],
options={
'ordering': ('-expedition',),
},
),
migrations.CreateModel(
name='PersonTrip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('time_underground', models.FloatField(help_text=b'In decimal hours')),
('is_logbook_entry_author', models.BooleanField(default=False)),
('logbook_entry', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.LogbookEntry')),
('personexpedition', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.PersonExpedition')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='QM',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('number', models.IntegerField(help_text=b'this is the sequential number in the year')),
('grade', models.CharField(choices=[(b'A', b'A: Large obvious lead'), (b'B', b'B: Average lead'), (b'C', b'C: Tight unpromising lead'), (b'D', b'D: Dig'), (b'X', b'X: Unclimbable aven')], max_length=1)),
('location_description', models.TextField(blank=True)),
('nearest_station_description', models.CharField(blank=True, max_length=400, null=True)),
('nearest_station_name', models.CharField(blank=True, max_length=200, null=True)),
('area', models.CharField(blank=True, max_length=100, null=True)),
('completion_description', models.TextField(blank=True, null=True)),
('comment', models.TextField(blank=True, null=True)),
('found_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='QMs_found', to='core.LogbookEntry')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ScannedImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('file', models.ImageField(storage=django.core.files.storage.FileSystemStorage(base_url=b'/survey_scans/', location=b'/expo/expofiles/'), upload_to=troggle.core.models.get_scan_path)),
('scanned_on', models.DateField(null=True)),
('contents', models.CharField(choices=[(b'notes', b'notes'), (b'plan', b'plan_sketch'), (b'elevation', b'elevation_sketch')], max_length=20)),
('number_in_wallet', models.IntegerField(null=True)),
('lon_utm', models.FloatField(blank=True, null=True)),
('lat_utm', models.FloatField(blank=True, null=True)),
('scanned_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Person')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SurvexBlock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('text', models.TextField()),
('date', models.DateTimeField(blank=True, null=True)),
('begin_char', models.IntegerField()),
('survexpath', models.CharField(max_length=200)),
('totalleglength', models.FloatField()),
('cave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
('expedition', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Expedition')),
('expeditionday', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.ExpeditionDay')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexBlock')),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='SurvexDirectory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=200)),
('cave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
],
options={
'ordering': ('path',),
},
),
migrations.CreateModel(
name='SurvexEquate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
],
),
migrations.CreateModel(
name='SurvexFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=200)),
('cave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
('survexdirectory', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexDirectory')),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='SurvexLeg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tape', models.FloatField()),
('compass', models.FloatField()),
('clino', models.FloatField()),
('block', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.SurvexBlock')),
],
),
migrations.CreateModel(
name='SurvexPersonRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nrole', models.CharField(blank=True, choices=[(b'insts', b'Instruments'), (b'dog', b'Other'), (b'notes', b'Notes'), (b'pics', b'Pictures'), (b'tape', b'Tape measure'), (b'useless', b'Useless'), (b'helper', b'Helper'), (b'disto', b'Disto'), (b'consultant', b'Consultant')], max_length=200, null=True)),
('personname', models.CharField(max_length=100)),
('expeditionday', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.ExpeditionDay')),
('person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Person')),
('personexpedition', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.PersonExpedition')),
('persontrip', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.PersonTrip')),
('survexblock', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.SurvexBlock')),
],
),
migrations.CreateModel(
name='SurvexScansFolder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fpath', models.CharField(max_length=200)),
('walletname', models.CharField(max_length=200)),
],
options={
'ordering': ('walletname',),
},
),
migrations.CreateModel(
name='SurvexScanSingle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ffile', models.CharField(max_length=200)),
('name', models.CharField(max_length=200)),
('survexscansfolder', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexScansFolder')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='SurvexStation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('x', models.FloatField(blank=True, null=True)),
('y', models.FloatField(blank=True, null=True)),
('z', models.FloatField(blank=True, null=True)),
('block', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.SurvexBlock')),
('equate', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexEquate')),
],
),
migrations.CreateModel(
name='SurvexTitle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('cave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
('survexblock', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.SurvexBlock')),
],
),
migrations.CreateModel(
name='Survey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('wallet_number', models.IntegerField(blank=True, null=True)),
('wallet_letter', models.CharField(blank=True, max_length=1, null=True)),
('comments', models.TextField(blank=True, null=True)),
('location', models.CharField(blank=True, max_length=400, null=True)),
('centreline_printed_on', models.DateField(blank=True, null=True)),
('tunnel_file', models.FileField(blank=True, null=True, upload_to=b'surveyXMLfiles')),
('integrated_into_main_sketch_on', models.DateField(blank=True, null=True)),
('rendered_image', models.ImageField(blank=True, null=True, upload_to=b'renderedSurveys')),
('centreline_printed_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='centreline_printed_by', to='core.Person')),
('expedition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Expedition')),
('integrated_into_main_sketch_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='integrated_into_main_sketch_by', to='core.Person')),
('logbook_entry', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.LogbookEntry')),
('subcave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.NewSubCave')),
('survex_block', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexBlock')),
('tunnel_main_sketch', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Survey')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TunnelFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tunnelpath', models.CharField(max_length=200)),
('tunnelname', models.CharField(max_length=200)),
('bfontcolours', models.BooleanField(default=False)),
('filesize', models.IntegerField(default=0)),
('npaths', models.IntegerField(default=0)),
('survexblocks', models.ManyToManyField(to='core.SurvexBlock')),
('survexscans', models.ManyToManyField(to='core.SurvexScanSingle')),
('survexscansfolders', models.ManyToManyField(to='core.SurvexScansFolder')),
('survextitles', models.ManyToManyField(to='core.SurvexTitle')),
('tunnelcontains', models.ManyToManyField(to='core.TunnelFile')),
],
options={
'ordering': ('tunnelpath',),
},
),
migrations.AddField(
model_name='survexleg',
name='stationfrom',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stationfrom', to='core.SurvexStation'),
),
migrations.AddField(
model_name='survexleg',
name='stationto',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stationto', to='core.SurvexStation'),
),
migrations.AddField(
model_name='survexdirectory',
name='primarysurvexfile',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='primarysurvexfile', to='core.SurvexFile'),
),
migrations.AddField(
model_name='survexblock',
name='survexfile',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexFile'),
),
migrations.AddField(
model_name='survexblock',
name='survexscansfolder',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexScansFolder'),
),
migrations.AddField(
model_name='scannedimage',
name='survey',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Survey'),
),
migrations.AddField(
model_name='qm',
name='nearest_station',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexStation'),
),
migrations.AddField(
model_name='qm',
name='ticked_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='QMs_ticked_off', to='core.LogbookEntry'),
),
migrations.AddField(
model_name='dphoto',
name='contains_entrance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='photo_file', to='core.Entrance'),
),
migrations.AddField(
model_name='dphoto',
name='contains_logbookentry',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.LogbookEntry'),
),
migrations.AddField(
model_name='dphoto',
name='contains_person',
field=models.ManyToManyField(blank=True, to='core.Person'),
),
migrations.AddField(
model_name='dphoto',
name='nearest_QM',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.QM'),
),
migrations.AddField(
model_name='cavedescription',
name='linked_entrances',
field=models.ManyToManyField(blank=True, to='core.Entrance'),
),
migrations.AddField(
model_name='cavedescription',
name='linked_qms',
field=models.ManyToManyField(blank=True, to='core.QM'),
),
migrations.AddField(
model_name='cavedescription',
name='linked_subcaves',
field=models.ManyToManyField(blank=True, to='core.NewSubCave'),
),
migrations.AddField(
model_name='caveandentrance',
name='entrance',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Entrance'),
),
]

View File

View File

@@ -10,13 +10,891 @@ from django.db.models import Min, Max
from django.conf import settings from django.conf import settings
from decimal import Decimal, getcontext from decimal import Decimal, getcontext
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
from imagekit.models import ImageModel from imagekit.models import ProcessedImageField #ImageModel
from django.template import Context, loader from django.template import Context, loader
import settings import settings
getcontext().prec=2 #use 2 significant figures for decimal calculations getcontext().prec=2 #use 2 significant figures for decimal calculations
from troggle.core.models_survex import * #ancient models for both survex and other things from troggle.core.models_survex import *
from troggle.core.models_old import *
def get_related_by_wikilinks(wiki_text):
found=re.findall(settings.QM_PATTERN,wiki_text)
res=[]
for wikilink in found:
qmdict={'urlroot':settings.URL_ROOT,'cave':wikilink[2],'year':wikilink[1],'number':wikilink[3]}
try:
cave_slugs = CaveSlug.objects.filter(cave__kataster_number = qmdict['cave'])
qm=QM.objects.get(found_by__cave_slug__in = cave_slugs,
found_by__date__year = qmdict['year'],
number = qmdict['number'])
res.append(qm)
except QM.DoesNotExist:
print('fail on '+str(wikilink))
return res
try:
logging.basicConfig(level=logging.DEBUG,
filename=settings.LOGFILE,
filemode='w')
except:
subprocess.call(settings.FIX_PERMISSIONS)
logging.basicConfig(level=logging.DEBUG,
filename=settings.LOGFILE,
filemode='w')
#This class is for adding fields and methods which all of our models will have.
class TroggleModel(models.Model):
new_since_parsing = models.BooleanField(default=False, editable=False)
non_public = models.BooleanField(default=False)
def object_name(self):
return self._meta.object_name
def get_admin_url(self):
return urlparse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk))
class Meta:
abstract = True
class TroggleImageModel(models.Model):
new_since_parsing = models.BooleanField(default=False, editable=False)
def object_name(self):
return self._meta.object_name
def get_admin_url(self):
return urlparse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk))
from troggle.core.models_millenial import * #updated models are here class Meta:
abstract = True
#
# single Expedition, usually seen by year
#
class Expedition(TroggleModel):
year = models.CharField(max_length=20, unique=True)
name = models.CharField(max_length=100)
def __unicode__(self):
return self.year
class Meta:
ordering = ('-year',)
get_latest_by = 'year'
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('expedition', args=[self.year]))
# construction function. should be moved out
def get_expedition_day(self, date):
expeditiondays = self.expeditionday_set.filter(date=date)
if expeditiondays:
assert len(expeditiondays) == 1
return expeditiondays[0]
res = ExpeditionDay(expedition=self, date=date)
res.save()
return res
def day_min(self):
res = self.Expeditionday_set.all()
return res and res[0] or None
def day_max(self):
res = self.Expeditionday_set.all()
return res and res[len(res) - 1] or None
class ExpeditionDay(TroggleModel):
expedition = models.ForeignKey("Expedition")
date = models.DateField()
class Meta:
ordering = ('date',)
def GetPersonTrip(self, personexpedition):
personexpeditions = self.Persontrip_set.filter(expeditionday=self)
return personexpeditions and personexpeditions[0] or None
def __unicode__(self):
return str(self.expedition) + ' ' + str(self.date)
#
# single Person, can go on many years
#
class Person(TroggleModel):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
fullname = models.CharField(max_length=200)
is_vfho = models.BooleanField(help_text="VFHO is the Vereines f&uuml;r H&ouml;hlenkunde in Obersteier, a nearby Austrian caving club.", default=False)
mug_shot = models.CharField(max_length=100, blank=True,null=True)
blurb = models.TextField(blank=True,null=True)
#href = models.CharField(max_length=200)
orderref = models.CharField(max_length=200) # for alphabetic
user = models.OneToOneField(User, null=True, blank=True)
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT,reverse('person',kwargs={'first_name':self.first_name,'last_name':self.last_name}))
class Meta:
verbose_name_plural = "People"
ordering = ('orderref',) # "Wookey" makes too complex for: ('last_name', 'first_name')
def __unicode__(self):
if self.last_name:
return "%s %s" % (self.first_name, self.last_name)
return self.first_name
def notability(self):
notability = Decimal(0)
max_expo_val = 0
max_expo_year = Expedition.objects.all().aggregate(Max('year'))
max_expo_val = int(max_expo_year['year__max']) + 1
for personexpedition in self.personexpedition_set.all():
if not personexpedition.is_guest:
# print(personexpedition.expedition.year)
notability += Decimal(1) / (max_expo_val - int(personexpedition.expedition.year))
return notability
def bisnotable(self):
return self.notability() > Decimal(1)/Decimal(3)
def surveyedleglength(self):
return sum([personexpedition.surveyedleglength() for personexpedition in self.personexpedition_set.all()])
def first(self):
return self.personexpedition_set.order_by('-expedition')[0]
def last(self):
return self.personexpedition_set.order_by('expedition')[0]
#def Sethref(self):
#if self.last_name:
#self.href = self.first_name.lower() + "_" + self.last_name.lower()
#self.orderref = self.last_name + " " + self.first_name
#else:
# self.href = self.first_name.lower()
#self.orderref = self.first_name
#self.notability = 0.0 # set temporarily
#
# Person's attenance to one Expo
#
class PersonExpedition(TroggleModel):
expedition = models.ForeignKey(Expedition)
person = models.ForeignKey(Person)
slugfield = models.SlugField(max_length=50,blank=True,null=True)
is_guest = models.BooleanField(default=False)
COMMITTEE_CHOICES = (
('leader','Expo leader'),
('medical','Expo medical officer'),
('treasurer','Expo treasurer'),
('sponsorship','Expo sponsorship coordinator'),
('research','Expo research coordinator'),
)
expo_committee_position = models.CharField(blank=True,null=True,choices=COMMITTEE_CHOICES,max_length=200)
nickname = models.CharField(max_length=100,blank=True,null=True)
def GetPersonroles(self):
res = [ ]
for personrole in self.personrole_set.order_by('survexblock'):
if res and res[-1]['survexpath'] == personrole.survexblock.survexpath:
res[-1]['roles'] += ", " + str(personrole.role)
else:
res.append({'date':personrole.survexblock.date, 'survexpath':personrole.survexblock.survexpath, 'roles':str(personrole.role)})
return res
class Meta:
ordering = ('-expedition',)
#order_with_respect_to = 'expedition'
def __unicode__(self):
return "%s: (%s)" % (self.person, self.expedition)
#why is the below a function in personexpedition, rather than in person? - AC 14 Feb 09
def name(self):
if self.nickname:
return "%s (%s) %s" % (self.person.first_name, self.nickname, self.person.last_name)
if self.person.last_name:
return "%s %s" % (self.person.first_name, self.person.last_name)
return self.person.first_name
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('personexpedition',kwargs={'first_name':self.person.first_name,'last_name':self.person.last_name,'year':self.expedition.year}))
def surveyedleglength(self):
survexblocks = [personrole.survexblock for personrole in self.personrole_set.all() ]
return sum([survexblock.totalleglength for survexblock in set(survexblocks)])
# would prefer to return actual person trips so we could link to first and last ones
def day_min(self):
res = self.persontrip_set.aggregate(day_min=Min("expeditionday__date"))
return res["day_min"]
def day_max(self):
res = self.persontrip_set.all().aggregate(day_max=Max("expeditionday__date"))
return res["day_max"]
#
# Single parsed entry from Logbook
#
class LogbookEntry(TroggleModel):
LOGBOOK_ENTRY_TYPES = (
("wiki", "Wiki style logbook"),
("html", "Html style logbook")
)
date = models.DateTimeField()#MJG wants to turn this into a datetime such that multiple Logbook entries on the same day can be ordered.ld()
expeditionday = models.ForeignKey("ExpeditionDay", null=True)#MJG wants to KILL THIS (redundant information)
expedition = models.ForeignKey(Expedition,blank=True,null=True) # yes this is double-
title = models.CharField(max_length=settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH)
cave_slug = models.SlugField(max_length=50)
place = models.CharField(max_length=100,blank=True,null=True,help_text="Only use this if you haven't chosen a cave")
text = models.TextField()
slug = models.SlugField(max_length=50)
filename = models.CharField(max_length=200,null=True)
entry_type = models.CharField(default="wiki",null=True,choices=LOGBOOK_ENTRY_TYPES,max_length=50)
class Meta:
verbose_name_plural = "Logbook Entries"
# several PersonTrips point in to this object
ordering = ('-date',)
def __getattribute__(self, item):
if item == "cave": #Allow a logbookentries cave to be directly accessed despite not having a proper foreignkey
return CaveSlug.objects.get(slug = self.cave_slug).cave
return super(LogbookEntry, self).__getattribute__(item)
def __init__(self, *args, **kwargs):
if "cave" in kwargs.keys():
if kwargs["cave"] is not None:
kwargs["cave_slug"] = CaveSlug.objects.get(cave=kwargs["cave"], primary=True).slug
kwargs.pop("cave")
return super(LogbookEntry, self).__init__(*args, **kwargs)
def isLogbookEntry(self): # Function used in templates
return True
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('logbookentry',kwargs={'date':self.date,'slug':self.slug}))
def __unicode__(self):
return "%s: (%s)" % (self.date, self.title)
def get_next_by_id(self):
LogbookEntry.objects.get(id=self.id+1)
def get_previous_by_id(self):
LogbookEntry.objects.get(id=self.id-1)
def new_QM_number(self):
"""Returns """
if self.cave:
nextQMnumber=self.cave.new_QM_number(self.date.year)
else:
return None
return nextQMnumber
def new_QM_found_link(self):
"""Produces a link to a new QM with the next number filled in and this LogbookEntry set as 'found by' """
return settings.URL_ROOT + r'/admin/core/qm/add/?' + r'found_by=' + str(self.pk) +'&number=' + str(self.new_QM_number())
def DayIndex(self):
return list(self.expeditionday.logbookentry_set.all()).index(self)
#
# Single Person going on a trip, which may or may not be written up (accounts for different T/U for people in same logbook entry)
#
class PersonTrip(TroggleModel):
personexpedition = models.ForeignKey("PersonExpedition",null=True)
#expeditionday = models.ForeignKey("ExpeditionDay")#MJG wants to KILL THIS (redundant information)
#date = models.DateField() #MJG wants to KILL THIS (redundant information)
time_underground = models.FloatField(help_text="In decimal hours")
logbook_entry = models.ForeignKey(LogbookEntry)
is_logbook_entry_author = models.BooleanField(default=False)
# sequencing by person (difficult to solve locally)
#persontrip_next = models.ForeignKey('PersonTrip', related_name='pnext', blank=True,null=True)#MJG wants to KILL THIS (and use funstion persontrip_next_auto)
#persontrip_prev = models.ForeignKey('PersonTrip', related_name='pprev', blank=True,null=True)#MJG wants to KILL THIS(and use funstion persontrip_prev_auto)
def persontrip_next(self):
futurePTs = PersonTrip.objects.filter(personexpedition = self.personexpedition, logbook_entry__date__gt = self.logbook_entry.date).order_by('logbook_entry__date').all()
if len(futurePTs) > 0:
return futurePTs[0]
else:
return None
def persontrip_prev(self):
pastPTs = PersonTrip.objects.filter(personexpedition = self.personexpedition, logbook_entry__date__lt = self.logbook_entry.date).order_by('-logbook_entry__date').all()
if len(pastPTs) > 0:
return pastPTs[0]
else:
return None
def place(self):
return self.logbook_entry.cave and self.logbook_entry.cave or self.logbook_entry.place
def __unicode__(self):
return "%s (%s)" % (self.personexpedition, self.logbook_entry.date)
##########################################
# move following classes into models_cave
##########################################
class Area(TroggleModel):
short_name = models.CharField(max_length=100)
name = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True,null=True)
parent = models.ForeignKey('Area', blank=True, null=True)
def __unicode__(self):
if self.parent:
return unicode(self.parent) + u" - " + unicode(self.short_name)
else:
return unicode(self.short_name)
def kat_area(self):
if self.short_name in ["1623", "1626"]:
return self.short_name
elif self.parent:
return self.parent.kat_area()
class CaveAndEntrance(models.Model):
cave = models.ForeignKey('Cave')
entrance = models.ForeignKey('Entrance')
entrance_letter = models.CharField(max_length=20,blank=True,null=True)
def __unicode__(self):
return unicode(self.cave) + unicode(self.entrance_letter)
class CaveSlug(models.Model):
cave = models.ForeignKey('Cave')
slug = models.SlugField(max_length=50, unique = True)
primary = models.BooleanField(default=False)
def __unicode__(self):
return self.slug
class Cave(TroggleModel):
# too much here perhaps,
official_name = models.CharField(max_length=160)
area = models.ManyToManyField(Area, blank=True)
kataster_code = models.CharField(max_length=20,blank=True,null=True)
kataster_number = models.CharField(max_length=10,blank=True, null=True)
unofficial_number = models.CharField(max_length=60,blank=True, null=True)
entrances = models.ManyToManyField('Entrance', through='CaveAndEntrance')
explorers = models.TextField(blank=True,null=True)
underground_description = models.TextField(blank=True,null=True)
equipment = models.TextField(blank=True,null=True)
references = models.TextField(blank=True,null=True)
survey = models.TextField(blank=True,null=True)
kataster_status = models.TextField(blank=True,null=True)
underground_centre_line = models.TextField(blank=True,null=True)
notes = models.TextField(blank=True,null=True)
length = models.CharField(max_length=100,blank=True,null=True)
depth = models.CharField(max_length=100,blank=True,null=True)
extent = models.CharField(max_length=100,blank=True,null=True)
survex_file = models.CharField(max_length=100,blank=True,null=True)
description_file = models.CharField(max_length=200,blank=True,null=True)
url = models.CharField(max_length=200,blank=True,null=True)
filename = models.CharField(max_length=200)
#class Meta:
# unique_together = (("area", "kataster_number"), ("area", "unofficial_number"))
# FIXME Kataster Areas and CUCC defined sub areas need seperating
#href = models.CharField(max_length=100)
class Meta:
ordering = ('kataster_code', 'unofficial_number')
def hassurvey(self):
if not self.underground_centre_line:
return "No"
if (self.survey.find("<img") > -1 or self.survey.find("<a") > -1 or self.survey.find("<IMG") > -1 or self.survey.find("<A") > -1):
return "Yes"
return "Missing"
def hassurveydata(self):
if not self.underground_centre_line:
return "No"
if self.survex_file:
return "Yes"
return "Missing"
def slug(self):
primarySlugs = self.caveslug_set.filter(primary = True)
if primarySlugs:
return primarySlugs[0].slug
else:
slugs = self.caveslug_set.filter()
if slugs:
return slugs[0].slug
def ours(self):
return bool(re.search(r'CUCC', self.explorers))
def reference(self):
if self.kataster_number:
return "%s-%s" % (self.kat_area(), self.kataster_number)
else:
return "%s-%s" % (self.kat_area(), self.unofficial_number)
def get_absolute_url(self):
if self.kataster_number:
href = self.kataster_number
elif self.unofficial_number:
href = self.unofficial_number
else:
href = self.official_name.lower()
#return settings.URL_ROOT + '/cave/' + href + '/'
return urlparse.urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,}))
def __unicode__(self, sep = u": "):
return unicode(self.slug())
def get_QMs(self):
return QM.objects.filter(nearest_station__block__cave__caveslug=self.caveslug_set.all())
def new_QM_number(self, year=datetime.date.today().year):
"""Given a cave and the current year, returns the next QM number."""
try:
res=QM.objects.filter(found_by__date__year=year, found_by__cave=self).order_by('-number')[0]
except IndexError:
return 1
return res.number+1
def kat_area(self):
for a in self.area.all():
if a.kat_area():
return a.kat_area()
def entrances(self):
return CaveAndEntrance.objects.filter(cave=self)
def singleentrance(self):
return len(CaveAndEntrance.objects.filter(cave=self)) == 1
def entrancelist(self):
rs = []
res = ""
for e in CaveAndEntrance.objects.filter(cave=self):
rs.append(e.entrance_letter)
rs.sort()
prevR = None
n = 0
for r in rs:
if prevR:
if chr(ord(prevR) + 1 ) == r:
prevR = r
n += 1
else:
if n == 0:
res += ", " + prevR
else:
res += "&ndash;" + prevR
else:
prevR = r
n = 0
res += r
if n == 0:
res += ", " + prevR
else:
res += "&ndash;" + prevR
return res
def writeDataFile(self):
try:
f = open(os.path.join(settings.CAVEDESCRIPTIONS, self.filename), "w")
except:
subprocess.call(settings.FIX_PERMISSIONS)
f = open(os.path.join(settings.CAVEDESCRIPTIONS, self.filename), "w")
t = loader.get_template('dataformat/cave.xml')
c = Context({'cave': self})
u = t.render(c)
u8 = u.encode("utf-8")
f.write(u8)
f.close()
def getArea(self):
areas = self.area.all()
lowestareas = list(areas)
for area in areas:
if area.parent in areas:
try:
lowestareas.remove(area.parent)
except:
pass
return lowestareas[0]
def getCaveByReference(reference):
areaname, code = reference.split("-", 1)
#print(areaname, code)
area = Area.objects.get(short_name = areaname)
#print(area)
foundCaves = list(Cave.objects.filter(area = area, kataster_number = code).all()) + list(Cave.objects.filter(area = area, unofficial_number = code).all())
print(list(foundCaves))
if len(foundCaves) == 1:
return foundCaves[0]
else:
return False
class OtherCaveName(TroggleModel):
name = models.CharField(max_length=160)
cave = models.ForeignKey(Cave)
def __unicode__(self):
return unicode(self.name)
class EntranceSlug(models.Model):
entrance = models.ForeignKey('Entrance')
slug = models.SlugField(max_length=50, unique = True)
primary = models.BooleanField(default=False)
class Entrance(TroggleModel):
name = models.CharField(max_length=100, blank=True,null=True)
entrance_description = models.TextField(blank=True,null=True)
explorers = models.TextField(blank=True,null=True)
map_description = models.TextField(blank=True,null=True)
location_description = models.TextField(blank=True,null=True)
approach = models.TextField(blank=True,null=True)
underground_description = models.TextField(blank=True,null=True)
photo = models.TextField(blank=True,null=True)
MARKING_CHOICES = (
('P', 'Paint'),
('P?', 'Paint (?)'),
('T', 'Tag'),
('T?', 'Tag (?)'),
('R', 'Needs Retag'),
('S', 'Spit'),
('S?', 'Spit (?)'),
('U', 'Unmarked'),
('?', 'Unknown'))
marking = models.CharField(max_length=2, choices=MARKING_CHOICES)
marking_comment = models.TextField(blank=True,null=True)
FINDABLE_CHOICES = (
('?', 'To be confirmed ...'),
('S', 'Coordinates'),
('L', 'Lost'),
('R', 'Refindable'))
findability = models.CharField(max_length=1, choices=FINDABLE_CHOICES, blank=True, null=True)
findability_description = models.TextField(blank=True,null=True)
alt = models.TextField(blank=True, null=True)
northing = models.TextField(blank=True, null=True)
easting = models.TextField(blank=True, null=True)
tag_station = models.TextField(blank=True, null=True)
exact_station = models.TextField(blank=True, null=True)
other_station = models.TextField(blank=True, null=True)
other_description = models.TextField(blank=True,null=True)
bearings = models.TextField(blank=True,null=True)
url = models.CharField(max_length=200,blank=True,null=True)
filename = models.CharField(max_length=200)
cached_primary_slug = models.CharField(max_length=200,blank=True,null=True)
def __unicode__(self):
return unicode(self.slug())
def exact_location(self):
return SurvexStation.objects.lookup(self.exact_station)
def other_location(self):
return SurvexStation.objects.lookup(self.other_station)
def find_location(self):
r = {'': 'To be entered ',
'?': 'To be confirmed:',
'S': '',
'L': 'Lost:',
'R': 'Refindable:'}[self.findability]
if self.tag_station:
try:
s = SurvexStation.objects.filter(name=self.tag_station)[:1]
s = s[0]
return r + "%0.0fE %0.0fN %0.0fAlt" % (s.x, s.y, s.z)
except:
return r + "%s Tag Station not in dataset" % self.tag_station
if self.exact_station:
try:
s = SurvexStation.objects.filter(name=self.exact_station)[:1]
s = s[0]
return r + "%0.0fE %0.0fN %0.0fAlt" % (s.x, s.y, s.z)
except:
return r + "%s Exact Station not in dataset" % self.tag_station
if self.other_station:
try:
s = SurvexStation.objects.filter(name=self.other_station)[:1]
s = s[0]
return r + "%0.0fE %0.0fN %0.0fAlt %s" % (s.x, s.y, s.z, self.other_description)
except:
return r + "%s Other Station not in dataset" % self.tag_station
if self.FINDABLE_CHOICES == "S":
r += "ERROR, Entrance has been surveyed but has no survex point"
if self.bearings:
return r + self.bearings
return r
def best_station(self):
if self.tag_station:
return self.tag_station
if self.exact_station:
return self.exact_station
if self.other_station:
return self.other_station
def has_photo(self):
if self.photo:
if (self.photo.find("<img") > -1 or self.photo.find("<a") > -1 or self.photo.find("<IMG") > -1 or self.photo.find("<A") > -1):
return "Yes"
else:
return "Missing"
else:
return "No"
def marking_val(self):
for m in self.MARKING_CHOICES:
if m[0] == self.marking:
return m[1]
def findability_val(self):
for f in self.FINDABLE_CHOICES:
if f[0] == self.findability:
return f[1]
def tag(self):
return SurvexStation.objects.lookup(self.tag_station)
def needs_surface_work(self):
return self.findability != "S" or not self.has_photo or self.marking != "T"
def get_absolute_url(self):
ancestor_titles='/'.join([subcave.title for subcave in self.get_ancestors()])
if ancestor_titles:
res = '/'.join((self.get_root().cave.get_absolute_url(), ancestor_titles, self.title))
else:
res = '/'.join((self.get_root().cave.get_absolute_url(), self.title))
return res
def slug(self):
if not self.cached_primary_slug:
primarySlugs = self.entranceslug_set.filter(primary = True)
if primarySlugs:
self.cached_primary_slug = primarySlugs[0].slug
self.save()
else:
slugs = self.entranceslug_set.filter()
if slugs:
self.cached_primary_slug = slugs[0].slug
self.save()
return self.cached_primary_slug
def writeDataFile(self):
try:
f = open(os.path.join(settings.ENTRANCEDESCRIPTIONS, self.filename), "w")
except:
subprocess.call(settings.FIX_PERMISSIONS)
f = open(os.path.join(settings.ENTRANCEDESCRIPTIONS, self.filename), "w")
t = loader.get_template('dataformat/entrance.xml')
c = Context({'entrance': self})
u = t.render(c)
u8 = u.encode("utf-8")
f.write(u8)
f.close()
class CaveDescription(TroggleModel):
short_name = models.CharField(max_length=50, unique = True)
long_name = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True,null=True)
linked_subcaves = models.ManyToManyField("NewSubCave", blank=True)
linked_entrances = models.ManyToManyField("Entrance", blank=True)
linked_qms = models.ManyToManyField("QM", blank=True)
def __unicode__(self):
if self.long_name:
return unicode(self.long_name)
else:
return unicode(self.short_name)
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('cavedescription', args=(self.short_name,)))
def save(self):
"""
Overridden save method which stores wikilinks in text as links in database.
"""
super(CaveDescription, self).save()
qm_list=get_related_by_wikilinks(self.description)
for qm in qm_list:
self.linked_qms.add(qm)
super(CaveDescription, self).save()
class NewSubCave(TroggleModel):
name = models.CharField(max_length=200, unique = True)
def __unicode__(self):
return unicode(self.name)
class QM(TroggleModel):
# based on qm.csv in trunk/expoweb/1623/204 which has the fields:
# "Number","Grade","Area","Description","Page reference","Nearest station","Completion description","Comment"
found_by = models.ForeignKey(LogbookEntry, related_name='QMs_found',blank=True, null=True )
ticked_off_by = models.ForeignKey(LogbookEntry, related_name='QMs_ticked_off',null=True,blank=True)
# cave = models.ForeignKey(Cave)
# expedition = models.ForeignKey(Expedition)
number = models.IntegerField(help_text="this is the sequential number in the year", )
GRADE_CHOICES=(
('A', 'A: Large obvious lead'),
('B', 'B: Average lead'),
('C', 'C: Tight unpromising lead'),
('D', 'D: Dig'),
('X', 'X: Unclimbable aven')
)
grade = models.CharField(max_length=1, choices=GRADE_CHOICES)
location_description = models.TextField(blank=True)
nearest_station_description = models.CharField(max_length=400,null=True,blank=True)
nearest_station_name = models.CharField(max_length=200,blank=True,null=True)
nearest_station = models.ForeignKey(SurvexStation,null=True,blank=True)
area = models.CharField(max_length=100,blank=True,null=True)
completion_description = models.TextField(blank=True,null=True)
comment=models.TextField(blank=True,null=True)
def __unicode__(self):
return u"%s %s" % (self.code(), self.grade)
def code(self):
if self.found_by:
# Old style QMs where found_by is a logbook entry
return u"%s-%s-%s" % (unicode(self.found_by.cave)[6:], self.found_by.date.year, self.number)
elif self.nearest_station:
# New style QMs where QMs are stored in SVX files and nearest station is a forigin key
return u"%s-%s-%s" % (self.nearest_station.block.name, self.nearest_station.name, self.number)
else:
# Just give up!!
return u"%s" % (self.number)
def get_absolute_url(self):
#return settings.URL_ROOT + '/cave/' + self.found_by.cave.kataster_number + '/' + str(self.found_by.date.year) + '-' + '%02d' %self.number
return urlparse.urljoin(settings.URL_ROOT, reverse('qm',kwargs={'qm_id':self.id}))
def get_next_by_id(self):
return QM.objects.get(id=self.id+1)
def get_previous_by_id(self):
return QM.objects.get(id=self.id-1)
def wiki_link(self):
return u"%s%s%s" % ('[[QM:',self.code(),']]')
photoFileStorage = FileSystemStorage(location=settings.PHOTOS_ROOT, base_url=settings.PHOTOS_URL)
class DPhoto(TroggleImageModel):
caption = models.CharField(max_length=1000,blank=True,null=True)
contains_logbookentry = models.ForeignKey(LogbookEntry,blank=True,null=True)
contains_person = models.ManyToManyField(Person,blank=True)
file = models.ImageField(storage=photoFileStorage, upload_to='.',)
is_mugshot = models.BooleanField(default=False)
contains_cave = models.ForeignKey(Cave,blank=True,null=True)
contains_entrance = models.ForeignKey(Entrance, related_name="photo_file",blank=True,null=True)
#nearest_survey_point = models.ForeignKey(SurveyStation,blank=True,null=True)
nearest_QM = models.ForeignKey(QM,blank=True,null=True)
lon_utm = models.FloatField(blank=True,null=True)
lat_utm = models.FloatField(blank=True,null=True)
class IKOptions:
spec_module = 'core.imagekit_specs'
cache_dir = 'thumbs'
image_field = 'file'
#content_type = models.ForeignKey(ContentType)
#object_id = models.PositiveIntegerField()
#location = generic.GenericForeignKey('content_type', 'object_id')
def __unicode__(self):
return self.caption
scansFileStorage = FileSystemStorage(location=settings.SURVEY_SCANS, base_url=settings.SURVEYS_URL)
def get_scan_path(instance, filename):
year=instance.survey.expedition.year
#print("WN: ", type(instance.survey.wallet_number), instance.survey.wallet_number, instance.survey.wallet_letter)
number=str(instance.survey.wallet_number)
if str(instance.survey.wallet_letter) != "None":
number=str(instance.survey.wallet_letter) + number #two strings formatting because convention is 2009#01 or 2009#X01
return os.path.join('./',year,year+r'#'+number,str(instance.contents)+str(instance.number_in_wallet)+r'.jpg')
class ScannedImage(TroggleImageModel):
file = models.ImageField(storage=scansFileStorage, upload_to=get_scan_path)
scanned_by = models.ForeignKey(Person,blank=True, null=True)
scanned_on = models.DateField(null=True)
survey = models.ForeignKey('Survey')
contents = models.CharField(max_length=20,choices=(('notes','notes'),('plan','plan_sketch'),('elevation','elevation_sketch')))
number_in_wallet = models.IntegerField(null=True)
lon_utm = models.FloatField(blank=True,null=True)
lat_utm = models.FloatField(blank=True,null=True)
class IKOptions:
spec_module = 'core.imagekit_specs'
cache_dir = 'thumbs'
image_field = 'file'
#content_type = models.ForeignKey(ContentType)
#object_id = models.PositiveIntegerField()
#location = generic.GenericForeignKey('content_type', 'object_id')
#This is an ugly hack to deal with the #s in our survey scan paths. The correct thing is to write a custom file storage backend which calls urlencode on the name for making file.url but not file.path.
def correctURL(self):
return string.replace(self.file.url,r'#',r'%23')
def __unicode__(self):
return get_scan_path(self,'')
class Survey(TroggleModel):
expedition = models.ForeignKey('Expedition') #REDUNDANT (logbook_entry)
wallet_number = models.IntegerField(blank=True,null=True)
wallet_letter = models.CharField(max_length=1,blank=True,null=True)
comments = models.TextField(blank=True,null=True)
location = models.CharField(max_length=400,blank=True,null=True) #REDUNDANT
subcave = models.ForeignKey('NewSubCave', blank=True, null=True)
#notes_scan = models.ForeignKey('ScannedImage',related_name='notes_scan',blank=True, null=True) #Replaced by contents field of ScannedImage model
survex_block = models.OneToOneField('SurvexBlock',blank=True, null=True)
logbook_entry = models.ForeignKey('LogbookEntry')
centreline_printed_on = models.DateField(blank=True, null=True)
centreline_printed_by = models.ForeignKey('Person',related_name='centreline_printed_by',blank=True,null=True)
#sketch_scan = models.ForeignKey(ScannedImage,blank=True, null=True) #Replaced by contents field of ScannedImage model
tunnel_file = models.FileField(upload_to='surveyXMLfiles',blank=True, null=True)
tunnel_main_sketch = models.ForeignKey('Survey',blank=True,null=True)
integrated_into_main_sketch_on = models.DateField(blank=True,null=True)
integrated_into_main_sketch_by = models.ForeignKey('Person' ,related_name='integrated_into_main_sketch_by', blank=True,null=True)
rendered_image = models.ImageField(upload_to='renderedSurveys',blank=True,null=True)
def __unicode__(self):
return self.expedition.year+"#" + "%s%02d" % (self.wallet_letter, int(self.wallet_number))
def notes(self):
return self.scannedimage_set.filter(contents='notes')
def plans(self):
return self.scannedimage_set.filter(contents='plan')
def elevations(self):
return self.scannedimage_set.filter(contents='elevation')
class DataIssue(TroggleModel):
date = models.DateTimeField(auto_now_add=True, blank=True)
parser = models.CharField(max_length=50, blank=True, null=True)
message = models.CharField(max_length=400, blank=True, null=True)
class Meta:
ordering = ['date']
def __unicode__(self):
return u"%s - %s" % (self.parser, self.message)

View File

@@ -1,864 +0,0 @@
import urllib, urlparse, string, os, datetime, logging, re
import subprocess
from django.forms import ModelForm
from django.db import models
from django.contrib import admin
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.db.models import Min, Max
from django.conf import settings
from decimal import Decimal, getcontext
from django.core.urlresolvers import reverse
from imagekit.models import ImageModel
from django.template import Context, loader
import settings
getcontext().prec=2 #use 2 significant figures for decimal calculations
from troggle.core.models_survex import *
from troggle.core.models_millenial import *
def get_related_by_wikilinks(wiki_text):
found=re.findall(settings.QM_PATTERN,wiki_text)
res=[]
for wikilink in found:
qmdict={'urlroot':settings.URL_ROOT,'cave':wikilink[2],'year':wikilink[1],'number':wikilink[3]}
try:
cave_slugs = CaveSlug.objects.filter(cave__kataster_number = qmdict['cave'])
qm=QM.objects.get(found_by__cave_slug__in = cave_slugs,
found_by__date__year = qmdict['year'],
number = qmdict['number'])
res.append(qm)
except QM.DoesNotExist:
print('fail on '+str(wikilink))
return res
try:
logging.basicConfig(level=logging.DEBUG,
filename=settings.LOGFILE,
filemode='w')
except:
subprocess.call(settings.FIX_PERMISSIONS)
logging.basicConfig(level=logging.DEBUG,
filename=settings.LOGFILE,
filemode='w')
#This class is for adding fields and methods which all of our models will have.
class TroggleModel(models.Model):
new_since_parsing = models.BooleanField(default=False, editable=False)
non_public = models.BooleanField(default=False)
def object_name(self):
return self._meta.object_name
def get_admin_url(self):
return urlparse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk))
class Meta:
abstract = True
class TroggleImageModel(ImageModel):
new_since_parsing = models.BooleanField(default=False, editable=False)
def object_name(self):
return self._meta.object_name
def get_admin_url(self):
return urlparse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk))
class Meta:
abstract = True
#
# single Expedition, usually seen by year
#
class Expedition(TroggleModel):
year = models.CharField(max_length=20, unique=True)
name = models.CharField(max_length=100)
def __unicode__(self):
return self.year
class Meta:
ordering = ('-year',)
get_latest_by = 'year'
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('expedition', args=[self.year]))
# construction function. should be moved out
def get_expedition_day(self, date):
expeditiondays = self.expeditionday_set.filter(date=date)
if expeditiondays:
assert len(expeditiondays) == 1
return expeditiondays[0]
res = ExpeditionDay(expedition=self, date=date)
res.save()
return res
def day_min(self):
res = self.expeditionday_set.all()
return res and res[0] or None
def day_max(self):
res = self.expeditionday_set.all()
return res and res[len(res) - 1] or None
class ExpeditionDay(TroggleModel):
expedition = models.ForeignKey("Expedition")
date = models.DateField()
class Meta:
ordering = ('date',)
def GetPersonTrip(self, personexpedition):
personexpeditions = self.persontrip_set.filter(expeditionday=self)
return personexpeditions and personexpeditions[0] or None
#
# single Person, can go on many years
#
class Person(TroggleModel):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
is_vfho = models.BooleanField(help_text="VFHO is the Vereines f&uuml;r H&ouml;hlenkunde in Obersteier, a nearby Austrian caving club.", default=False)
mug_shot = models.CharField(max_length=100, blank=True,null=True)
blurb = models.TextField(blank=True,null=True)
#href = models.CharField(max_length=200)
orderref = models.CharField(max_length=200) # for alphabetic
#the below have been removed and made methods. I'm not sure what the b in bisnotable stands for. - AC 16 Feb
#notability = models.FloatField() # for listing the top 20 people
#bisnotable = models.BooleanField(default=False)
user = models.OneToOneField(User, null=True, blank=True)
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT,reverse('person',kwargs={'first_name':self.first_name,'last_name':self.last_name}))
class Meta:
verbose_name_plural = "People"
ordering = ('orderref',) # "Wookey" makes too complex for: ('last_name', 'first_name')
def __unicode__(self):
if self.last_name:
return "%s %s" % (self.first_name, self.last_name)
return self.first_name
def notability(self):
notability = Decimal(0)
for personexpedition in self.personexpedition_set.all():
if not personexpedition.is_guest:
notability += Decimal(1) / (2012 - int(personexpedition.expedition.year))
return notability
def bisnotable(self):
return self.notability() > Decimal(1)/Decimal(3)
def surveyedleglength(self):
return sum([personexpedition.surveyedleglength() for personexpedition in self.personexpedition_set.all()])
def first(self):
return self.personexpedition_set.order_by('-expedition')[0]
def last(self):
return self.personexpedition_set.order_by('expedition')[0]
#def Sethref(self):
#if self.last_name:
#self.href = self.first_name.lower() + "_" + self.last_name.lower()
#self.orderref = self.last_name + " " + self.first_name
#else:
# self.href = self.first_name.lower()
#self.orderref = self.first_name
#self.notability = 0.0 # set temporarily
#
# Person's attenance to one Expo
#
class PersonExpedition(TroggleModel):
expedition = models.ForeignKey(Expedition)
person = models.ForeignKey(Person)
slugfield = models.SlugField(max_length=50,blank=True,null=True)
is_guest = models.BooleanField(default=False)
COMMITTEE_CHOICES = (
('leader','Expo leader'),
('medical','Expo medical officer'),
('treasurer','Expo treasurer'),
('sponsorship','Expo sponsorship coordinator'),
('research','Expo research coordinator'),
)
expo_committee_position = models.CharField(blank=True,null=True,choices=COMMITTEE_CHOICES,max_length=200)
nickname = models.CharField(max_length=100,blank=True,null=True)
def GetPersonroles(self):
res = [ ]
for personrole in self.personrole_set.order_by('survexblock'):
if res and res[-1]['survexpath'] == personrole.survexblock.survexpath:
res[-1]['roles'] += ", " + str(personrole.role)
else:
res.append({'date':personrole.survexblock.date, 'survexpath':personrole.survexblock.survexpath, 'roles':str(personrole.role)})
return res
class Meta:
ordering = ('-expedition',)
#order_with_respect_to = 'expedition'
def __unicode__(self):
return "%s: (%s)" % (self.person, self.expedition)
#why is the below a function in personexpedition, rather than in person? - AC 14 Feb 09
def name(self):
if self.nickname:
return "%s (%s) %s" % (self.person.first_name, self.nickname, self.person.last_name)
if self.person.last_name:
return "%s %s" % (self.person.first_name, self.person.last_name)
return self.person.first_name
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('personexpedition',kwargs={'first_name':self.person.first_name,'last_name':self.person.last_name,'year':self.expedition.year}))
def surveyedleglength(self):
survexblocks = [personrole.survexblock for personrole in self.personrole_set.all() ]
return sum([survexblock.totalleglength for survexblock in set(survexblocks)])
# would prefer to return actual person trips so we could link to first and last ones
def day_min(self):
res = self.persontrip_set.aggregate(day_min=Min("expeditionday__date"))
return res["day_min"]
def day_max(self):
res = self.persontrip_set.all().aggregate(day_max=Max("expeditionday__date"))
return res["day_max"]
#
# Single parsed entry from Logbook
#
class LogbookEntry(TroggleModel):
date = models.DateField()#MJG wants to turn this into a datetime such that multiple Logbook entries on the same day can be ordered.
expeditionday = models.ForeignKey("ExpeditionDay", null=True)#MJG wants to KILL THIS (redundant information)
expedition = models.ForeignKey(Expedition,blank=True,null=True) # yes this is double-
#author = models.ForeignKey(PersonExpedition,blank=True,null=True) # the person who writes it up doesn't have to have been on the trip.
# Re: the above- so this field should be "typist" or something, not "author". - AC 15 jun 09
#MJG wants to KILL THIS, as it is typically redundant with PersonTrip.is_logbook_entry_author, in the rare it was not redundanty and of actually interest it could be added to the text.
title = models.CharField(max_length=settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH)
cave_slug = models.SlugField(max_length=50)
place = models.CharField(max_length=100,blank=True,null=True,help_text="Only use this if you haven't chosen a cave")
text = models.TextField()
slug = models.SlugField(max_length=50)
filename = models.CharField(max_length=200,null=True)
class Meta:
verbose_name_plural = "Logbook Entries"
# several PersonTrips point in to this object
ordering = ('-date',)
def __getattribute__(self, item):
if item == "cave": #Allow a logbookentries cave to be directly accessed despite not having a proper foreignkey
return CaveSlug.objects.get(slug = self.cave_slug).cave
return super(LogbookEntry, self).__getattribute__(item)
def __init__(self, *args, **kwargs):
if "cave" in kwargs.keys():
if kwargs["cave"] is not None:
kwargs["cave_slug"] = CaveSlug.objects.get(cave=kwargs["cave"], primary=True).slug
kwargs.pop("cave")
return super(LogbookEntry, self).__init__(*args, **kwargs)
def isLogbookEntry(self): # Function used in templates
return True
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('logbookentry',kwargs={'date':self.date,'slug':self.slug}))
def __unicode__(self):
return "%s: (%s)" % (self.date, self.title)
def get_next_by_id(self):
LogbookEntry.objects.get(id=self.id+1)
def get_previous_by_id(self):
LogbookEntry.objects.get(id=self.id-1)
def new_QM_number(self):
"""Returns """
if self.cave:
nextQMnumber=self.cave.new_QM_number(self.date.year)
else:
return none
return nextQMnumber
def new_QM_found_link(self):
"""Produces a link to a new QM with the next number filled in and this LogbookEntry set as 'found by' """
return settings.URL_ROOT + r'/admin/core/qm/add/?' + r'found_by=' + str(self.pk) +'&number=' + str(self.new_QM_number())
def DayIndex(self):
return list(self.expeditionday.logbookentry_set.all()).index(self)
#
# Single Person going on a trip, which may or may not be written up (accounts for different T/U for people in same logbook entry)
#
class PersonTrip(TroggleModel):
personexpedition = models.ForeignKey("PersonExpedition",null=True)
#expeditionday = models.ForeignKey("ExpeditionDay")#MJG wants to KILL THIS (redundant information)
#date = models.DateField() #MJG wants to KILL THIS (redundant information)
time_underground = models.FloatField(help_text="In decimal hours")
logbook_entry = models.ForeignKey(LogbookEntry)
is_logbook_entry_author = models.BooleanField(default=False)
# sequencing by person (difficult to solve locally)
#persontrip_next = models.ForeignKey('PersonTrip', related_name='pnext', blank=True,null=True)#MJG wants to KILL THIS (and use funstion persontrip_next_auto)
#persontrip_prev = models.ForeignKey('PersonTrip', related_name='pprev', blank=True,null=True)#MJG wants to KILL THIS(and use funstion persontrip_prev_auto)
def persontrip_next(self):
futurePTs = PersonTrip.objects.filter(personexpedition = self.personexpedition, logbook_entry__date__gt = self.logbook_entry.date).order_by('logbook_entry__date').all()
if len(futurePTs) > 0:
return futurePTs[0]
else:
return None
def persontrip_prev(self):
pastPTs = PersonTrip.objects.filter(personexpedition = self.personexpedition, logbook_entry__date__lt = self.logbook_entry.date).order_by('-logbook_entry__date').all()
if len(pastPTs) > 0:
return pastPTs[0]
else:
return None
def place(self):
return self.logbook_entry.cave and self.logbook_entry.cave or self.logbook_entry.place
def __unicode__(self):
return "%s (%s)" % (self.personexpedition, self.logbook_entry.date)
##########################################
# move following classes into models_cave
##########################################
class Area(TroggleModel):
short_name = models.CharField(max_length=100)
name = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True,null=True)
parent = models.ForeignKey('Area', blank=True, null=True)
def __unicode__(self):
if self.parent:
return unicode(self.parent) + u" - " + unicode(self.short_name)
else:
return unicode(self.short_name)
def kat_area(self):
if self.short_name in ["1623", "1626"]:
return self.short_name
elif self.parent:
return self.parent.kat_area()
class CaveAndEntrance(models.Model):
cave = models.ForeignKey('Cave')
entrance = models.ForeignKey('Entrance')
entrance_letter = models.CharField(max_length=20,blank=True,null=True)
def __unicode__(self):
return unicode(self.cave) + unicode(self.entrance_letter)
class CaveSlug(models.Model):
cave = models.ForeignKey('Cave')
slug = models.SlugField(max_length=50, unique = True)
primary = models.BooleanField(default=False)
class Cave(TroggleModel):
# too much here perhaps,
official_name = models.CharField(max_length=160)
area = models.ManyToManyField(Area, blank=True, null=True)
kataster_code = models.CharField(max_length=20,blank=True,null=True)
kataster_number = models.CharField(max_length=10,blank=True, null=True)
unofficial_number = models.CharField(max_length=60,blank=True, null=True)
entrances = models.ManyToManyField('Entrance', through='CaveAndEntrance')
explorers = models.TextField(blank=True,null=True)
underground_description = models.TextField(blank=True,null=True)
equipment = models.TextField(blank=True,null=True)
references = models.TextField(blank=True,null=True)
survey = models.TextField(blank=True,null=True)
kataster_status = models.TextField(blank=True,null=True)
underground_centre_line = models.TextField(blank=True,null=True)
notes = models.TextField(blank=True,null=True)
length = models.CharField(max_length=100,blank=True,null=True)
depth = models.CharField(max_length=100,blank=True,null=True)
extent = models.CharField(max_length=100,blank=True,null=True)
survex_file = models.CharField(max_length=100,blank=True,null=True)
description_file = models.CharField(max_length=200,blank=True,null=True)
url = models.CharField(max_length=200,blank=True,null=True)
filename = models.CharField(max_length=200)
#class Meta:
# unique_together = (("area", "kataster_number"), ("area", "unofficial_number"))
# FIXME Kataster Areas and CUCC defined sub areas need seperating
#href = models.CharField(max_length=100)
class Meta:
ordering = ('kataster_code', 'unofficial_number')
def hassurvey(self):
if not self.underground_centre_line:
return "No"
if (self.survey.find("<img") > -1 or self.survey.find("<a") > -1 or self.survey.find("<IMG") > -1 or self.survey.find("<A") > -1):
return "Yes"
return "Missing"
def hassurveydata(self):
if not self.underground_centre_line:
return "No"
if self.survex_file:
return "Yes"
return "Missing"
def slug(self):
primarySlugs = self.caveslug_set.filter(primary = True)
if primarySlugs:
return primarySlugs[0].slug
else:
slugs = self.caveslug_set.filter()
if slugs:
return slugs[0].slug
def ours(self):
return bool(re.search(r'CUCC', self.explorers))
def reference(self):
if self.kataster_number:
return "%s-%s" % (self.kat_area(), self.kataster_number)
else:
return "%s-%s" % (self.kat_area(), self.unofficial_number)
def get_absolute_url(self):
if self.kataster_number:
href = self.kataster_number
elif self.unofficial_number:
href = self.unofficial_number
else:
href = official_name.lower()
#return settings.URL_ROOT + '/cave/' + href + '/'
return urlparse.urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,}))
def __unicode__(self, sep = u": "):
return unicode(self.slug())
def get_QMs(self):
return QM.objects.filter(found_by__cave_slug=self.caveslug_set.all())
def new_QM_number(self, year=datetime.date.today().year):
"""Given a cave and the current year, returns the next QM number."""
try:
res=QM.objects.filter(found_by__date__year=year, found_by__cave=self).order_by('-number')[0]
except IndexError:
return 1
return res.number+1
def kat_area(self):
for a in self.area.all():
if a.kat_area():
return a.kat_area()
def entrances(self):
return CaveAndEntrance.objects.filter(cave=self)
def singleentrance(self):
return len(CaveAndEntrance.objects.filter(cave=self)) == 1
def entrancelist(self):
rs = []
res = ""
for e in CaveAndEntrance.objects.filter(cave=self):
rs.append(e.entrance_letter)
rs.sort()
prevR = None
n = 0
for r in rs:
if prevR:
if chr(ord(prevR) + 1 ) == r:
prevR = r
n += 1
else:
if n == 0:
res += ", " + prevR
else:
res += "&ndash;" + prevR
else:
prevR = r
n = 0
res += r
if n == 0:
res += ", " + prevR
else:
res += "&ndash;" + prevR
return res
def writeDataFile(self):
try:
f = open(os.path.join(settings.CAVEDESCRIPTIONS, self.filename), "w")
except:
subprocess.call(settings.FIX_PERMISSIONS)
f = open(os.path.join(settings.CAVEDESCRIPTIONS, self.filename), "w")
t = loader.get_template('dataformat/cave.xml')
c = Context({'cave': self})
u = t.render(c)
u8 = u.encode("utf-8")
f.write(u8)
f.close()
def getArea(self):
areas = self.area.all()
lowestareas = list(areas)
for area in areas:
if area.parent in areas:
try:
lowestareas.remove(area.parent)
except:
pass
return lowestareas[0]
def getCaveByReference(reference):
areaname, code = reference.split("-", 1)
print(areaname, code)
area = Area.objects.get(short_name = areaname)
print(area)
foundCaves = list(Cave.objects.filter(area = area, kataster_number = code).all()) + list(Cave.objects.filter(area = area, unofficial_number = code).all())
print(list(foundCaves))
assert len(foundCaves) == 1
return foundCaves[0]
class OtherCaveName(TroggleModel):
name = models.CharField(max_length=160)
cave = models.ForeignKey(Cave)
def __unicode__(self):
return unicode(self.name)
class EntranceSlug(models.Model):
entrance = models.ForeignKey('Entrance')
slug = models.SlugField(max_length=50, unique = True)
primary = models.BooleanField(default=False)
class Entrance(TroggleModel):
name = models.CharField(max_length=100, blank=True,null=True)
entrance_description = models.TextField(blank=True,null=True)
explorers = models.TextField(blank=True,null=True)
map_description = models.TextField(blank=True,null=True)
location_description = models.TextField(blank=True,null=True)
approach = models.TextField(blank=True,null=True)
underground_description = models.TextField(blank=True,null=True)
photo = models.TextField(blank=True,null=True)
MARKING_CHOICES = (
('P', 'Paint'),
('P?', 'Paint (?)'),
('T', 'Tag'),
('T?', 'Tag (?)'),
('R', 'Needs Retag'),
('S', 'Spit'),
('S?', 'Spit (?)'),
('U', 'Unmarked'),
('?', 'Unknown'))
marking = models.CharField(max_length=2, choices=MARKING_CHOICES)
marking_comment = models.TextField(blank=True,null=True)
FINDABLE_CHOICES = (
('?', 'To be confirmed ...'),
('S', 'Coordinates'),
('L', 'Lost'),
('R', 'Refindable'))
findability = models.CharField(max_length=1, choices=FINDABLE_CHOICES, blank=True, null=True)
findability_description = models.TextField(blank=True,null=True)
alt = models.TextField(blank=True, null=True)
northing = models.TextField(blank=True, null=True)
easting = models.TextField(blank=True, null=True)
tag_station = models.TextField(blank=True, null=True)
exact_station = models.TextField(blank=True, null=True)
other_station = models.TextField(blank=True, null=True)
other_description = models.TextField(blank=True,null=True)
bearings = models.TextField(blank=True,null=True)
url = models.CharField(max_length=200,blank=True,null=True)
filename = models.CharField(max_length=200)
cached_primary_slug = models.CharField(max_length=200,blank=True,null=True)
def __unicode__(self):
return unicode(self.slug())
def exact_location(self):
return SurvexStation.objects.lookup(self.exact_station)
def other_location(self):
return SurvexStation.objects.lookup(self.other_station)
def find_location(self):
r = {'': 'To be entered ',
'?': 'To be confirmed:',
'S': '',
'L': 'Lost:',
'R': 'Refindable:'}[self.findability]
if self.tag_station:
try:
s = SurvexStation.objects.lookup(self.tag_station)
return r + "%0.0fE %0.0fN %0.0fAlt" % (s.x, s.y, s.z)
except:
return r + "%s Tag Station not in dataset" % self.tag_station
if self.exact_station:
try:
s = SurvexStation.objects.lookup(self.exact_station)
return r + "%0.0fE %0.0fN %0.0fAlt" % (s.x, s.y, s.z)
except:
return r + "%s Exact Station not in dataset" % self.tag_station
if self.other_station:
try:
s = SurvexStation.objects.lookup(self.other_station)
return r + "%0.0fE %0.0fN %0.0fAlt %s" % (s.x, s.y, s.z, self.other_description)
except:
return r + "%s Other Station not in dataset" % self.tag_station
if self.FINDABLE_CHOICES == "S":
r += "ERROR, Entrance has been surveyed but has no survex point"
if self.bearings:
return r + self.bearings
return r
def best_station(self):
if self.tag_station:
return self.tag_station
if self.exact_station:
return self.exact_station
if self.other_station:
return self.other_station
def has_photo(self):
if self.photo:
if (self.photo.find("<img") > -1 or self.photo.find("<a") > -1 or self.photo.find("<IMG") > -1 or self.photo.find("<A") > -1):
return "Yes"
else:
return "Missing"
else:
return "No"
def marking_val(self):
for m in self.MARKING_CHOICES:
if m[0] == self.marking:
return m[1]
def findability_val(self):
for f in self.FINDABLE_CHOICES:
if f[0] == self.findability:
return f[1]
def tag(self):
return SurvexStation.objects.lookup(self.tag_station)
def needs_surface_work(self):
return self.findability != "S" or not self.has_photo or self.marking != "T"
def get_absolute_url(self):
ancestor_titles='/'.join([subcave.title for subcave in self.get_ancestors()])
if ancestor_titles:
res = '/'.join((self.get_root().cave.get_absolute_url(), ancestor_titles, self.title))
else:
res = '/'.join((self.get_root().cave.get_absolute_url(), self.title))
return res
def slug(self):
if not self.cached_primary_slug:
primarySlugs = self.entranceslug_set.filter(primary = True)
if primarySlugs:
self.cached_primary_slug = primarySlugs[0].slug
self.save()
else:
slugs = self.entranceslug_set.filter()
if slugs:
self.cached_primary_slug = slugs[0].slug
self.save()
return self.cached_primary_slug
def writeDataFile(self):
try:
f = open(os.path.join(settings.ENTRANCEDESCRIPTIONS, self.filename), "w")
except:
subprocess.call(settings.FIX_PERMISSIONS)
f = open(os.path.join(settings.ENTRANCEDESCRIPTIONS, self.filename), "w")
t = loader.get_template('dataformat/entrance.xml')
c = Context({'entrance': self})
u = t.render(c)
u8 = u.encode("utf-8")
f.write(u8)
f.close()
class CaveDescription(TroggleModel):
short_name = models.CharField(max_length=50, unique = True)
long_name = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True,null=True)
linked_subcaves = models.ManyToManyField("NewSubCave", blank=True,null=True)
linked_entrances = models.ManyToManyField("Entrance", blank=True,null=True)
linked_qms = models.ManyToManyField("QM", blank=True,null=True)
def __unicode__(self):
if self.long_name:
return unicode(self.long_name)
else:
return unicode(self.short_name)
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('cavedescription', args=(self.short_name,)))
def save(self):
"""
Overridden save method which stores wikilinks in text as links in database.
"""
super(CaveDescription, self).save()
qm_list=get_related_by_wikilinks(self.description)
for qm in qm_list:
self.linked_qms.add(qm)
super(CaveDescription, self).save()
class NewSubCave(TroggleModel):
name = models.CharField(max_length=200, unique = True)
def __unicode__(self):
return unicode(self.name)
class QM(TroggleModel):
#based on qm.csv in trunk/expoweb/1623/204 which has the fields:
#"Number","Grade","Area","Description","Page reference","Nearest station","Completion description","Comment"
found_by = models.ForeignKey(LogbookEntry, related_name='QMs_found',blank=True, null=True )
ticked_off_by = models.ForeignKey(LogbookEntry, related_name='QMs_ticked_off',null=True,blank=True)
#cave = models.ForeignKey(Cave)
#expedition = models.ForeignKey(Expedition)
number = models.IntegerField(help_text="this is the sequential number in the year", )
GRADE_CHOICES=(
('A', 'A: Large obvious lead'),
('B', 'B: Average lead'),
('C', 'C: Tight unpromising lead'),
('D', 'D: Dig'),
('X', 'X: Unclimbable aven')
)
grade = models.CharField(max_length=1, choices=GRADE_CHOICES)
location_description = models.TextField(blank=True)
#should be a foreignkey to surveystation
nearest_station_description = models.CharField(max_length=400,null=True,blank=True)
nearest_station = models.CharField(max_length=200,blank=True,null=True)
area = models.CharField(max_length=100,blank=True,null=True)
completion_description = models.TextField(blank=True,null=True)
comment=models.TextField(blank=True,null=True)
def __unicode__(self):
return u"%s %s" % (self.code(), self.grade)
def code(self):
return u"%s-%s-%s" % (unicode(self.found_by.cave)[6:], self.found_by.date.year, self.number)
def get_absolute_url(self):
#return settings.URL_ROOT + '/cave/' + self.found_by.cave.kataster_number + '/' + str(self.found_by.date.year) + '-' + '%02d' %self.number
return urlparse.urljoin(settings.URL_ROOT, reverse('qm',kwargs={'cave_id':self.found_by.cave.kataster_number,'year':self.found_by.date.year,'qm_id':self.number,'grade':self.grade}))
def get_next_by_id(self):
return QM.objects.get(id=self.id+1)
def get_previous_by_id(self):
return QM.objects.get(id=self.id-1)
def wiki_link(self):
return u"%s%s%s" % ('[[QM:',self.code(),']]')
photoFileStorage = FileSystemStorage(location=settings.PHOTOS_ROOT, base_url=settings.PHOTOS_URL)
class DPhoto(TroggleImageModel):
caption = models.CharField(max_length=1000,blank=True,null=True)
contains_logbookentry = models.ForeignKey(LogbookEntry,blank=True,null=True)
contains_person = models.ManyToManyField(Person,blank=True,null=True)
file = models.ImageField(storage=photoFileStorage, upload_to='.',)
is_mugshot = models.BooleanField(default=False)
contains_cave = models.ForeignKey(Cave,blank=True,null=True)
contains_entrance = models.ForeignKey(Entrance, related_name="photo_file",blank=True,null=True)
#nearest_survey_point = models.ForeignKey(SurveyStation,blank=True,null=True)
nearest_QM = models.ForeignKey(QM,blank=True,null=True)
lon_utm = models.FloatField(blank=True,null=True)
lat_utm = models.FloatField(blank=True,null=True)
class IKOptions:
spec_module = 'core.imagekit_specs'
cache_dir = 'thumbs'
image_field = 'file'
#content_type = models.ForeignKey(ContentType)
#object_id = models.PositiveIntegerField()
#location = generic.GenericForeignKey('content_type', 'object_id')
def __unicode__(self):
return self.caption
scansFileStorage = FileSystemStorage(location=settings.SURVEY_SCANS, base_url=settings.SURVEYS_URL)
def get_scan_path(instance, filename):
year=instance.survey.expedition.year
#print("WN: ", type(instance.survey.wallet_number), instance.survey.wallet_number, instance.survey.wallet_letter)
number=str(instance.survey.wallet_number)
if str(instance.survey.wallet_letter) != "None":
number=str(instance.survey.wallet_letter) + number #two strings formatting because convention is 2009#01 or 2009#X01
return os.path.join('./',year,year+r'#'+number,str(instance.contents)+str(instance.number_in_wallet)+r'.jpg')
class ScannedImage(TroggleImageModel):
file = models.ImageField(storage=scansFileStorage, upload_to=get_scan_path)
scanned_by = models.ForeignKey(Person,blank=True, null=True)
scanned_on = models.DateField(null=True)
survey = models.ForeignKey('Survey')
contents = models.CharField(max_length=20,choices=(('notes','notes'),('plan','plan_sketch'),('elevation','elevation_sketch')))
number_in_wallet = models.IntegerField(null=True)
lon_utm = models.FloatField(blank=True,null=True)
lat_utm = models.FloatField(blank=True,null=True)
class IKOptions:
spec_module = 'core.imagekit_specs'
cache_dir = 'thumbs'
image_field = 'file'
#content_type = models.ForeignKey(ContentType)
#object_id = models.PositiveIntegerField()
#location = generic.GenericForeignKey('content_type', 'object_id')
#This is an ugly hack to deal with the #s in our survey scan paths. The correct thing is to write a custom file storage backend which calls urlencode on the name for making file.url but not file.path.
def correctURL(self):
return string.replace(self.file.url,r'#',r'%23')
def __unicode__(self):
return get_scan_path(self,'')
class Survey(TroggleModel):
expedition = models.ForeignKey('Expedition') #REDUNDANT (logbook_entry)
wallet_number = models.IntegerField(blank=True,null=True)
wallet_letter = models.CharField(max_length=1,blank=True,null=True)
comments = models.TextField(blank=True,null=True)
location = models.CharField(max_length=400,blank=True,null=True) #REDUNDANT
subcave = models.ForeignKey('NewSubCave', blank=True, null=True)
#notes_scan = models.ForeignKey('ScannedImage',related_name='notes_scan',blank=True, null=True) #Replaced by contents field of ScannedImage model
survex_block = models.OneToOneField('SurvexBlock',blank=True, null=True)
logbook_entry = models.ForeignKey('LogbookEntry')
centreline_printed_on = models.DateField(blank=True, null=True)
centreline_printed_by = models.ForeignKey('Person',related_name='centreline_printed_by',blank=True,null=True)
#sketch_scan = models.ForeignKey(ScannedImage,blank=True, null=True) #Replaced by contents field of ScannedImage model
tunnel_file = models.FileField(upload_to='surveyXMLfiles',blank=True, null=True)
tunnel_main_sketch = models.ForeignKey('Survey',blank=True,null=True)
integrated_into_main_sketch_on = models.DateField(blank=True,null=True)
integrated_into_main_sketch_by = models.ForeignKey('Person' ,related_name='integrated_into_main_sketch_by', blank=True,null=True)
rendered_image = models.ImageField(upload_to='renderedSurveys',blank=True,null=True)
def __unicode__(self):
return self.expedition.year+"#"+"%02d" % int(self.wallet_number)
def notes(self):
return self.scannedimage_set.filter(contents='notes')
def plans(self):
return self.scannedimage_set.filter(contents='plan')
def elevations(self):
return self.scannedimage_set.filter(contents='elevation')

View File

@@ -1,83 +0,0 @@
from django.db import models
from django.conf import settings
import troggle.core.methods_millenial as methods_millenial
#
# This file was created in 2019
# It's a result of massive frustration with cluttered database of troggle
# Maximal clarity of code was primary goal (previous code had very little comments)
# Maximal speed of database rebuild was secondary goal
#
#
# The following file will tell you what fields and methods are avaliable inside this database
# be carefull you might miss some! ManyToMany fields can be used from the far end as well
#
#
# Naming conventions:
# (Upper/lower convention)
# Class names are writen Udddd_ddd_dddM - they finish with M for backwards compatibility
# Fields/methods are written lower_lower_lower
#
class PersonM(models.Model): #instance of this class corresponds to one physical peson
name = models.CharField(max_length=100) #just name, talk to wookey if you diagree
surveys_made = models.ManyToManyField('SurveyM', related_name='people_surveyed') #links to survey objects that this person made (made=:survex says so)
expos_attended = models.ManyToManyField('ExpeditionM', related_name='people_attended') #expos attended by this person (attended=:folk.csv says so)
logbook_entries_written = models.ManyToManyField('Logbook_entryM', related_name='people_wrote') #links to logbook chuncks created by a person
class CaveM(models.Model): #instance of this class corresponds to one 'thing' that people call cave
entrance = models.CharField(max_length=100) #UTM string describing ONE(!) entrance. Purpose = findability
title = models.TextField() #title given to the topmost survey in survex, numeric name otherwise c.f. name (e.g. 'Fishface')
name = models.TextField() #name given to the topmost survey in survex (e.g. '2017-cucc-28')
surveys = models.ManyToManyField('SurveyM', related_name='cave_parent') #links to surveys objects that this cave contains
survex_file = models.TextField() #gives path to top level survex file
total_length = models.FloatField() #holds total length of this cave (as given by cavern)
total_depth = models.FloatField() #holds total depth of this cave (as given by cavern)
description = models.TextField() #holds link to description
date = models.TextField() #holds date of last visit
def top_camp_distance(self): #returns distance of this cave from topcamp
return methods_millenial.top_camp_distance(self.entrance)
def top_camp_bearing(self): #returns bearing to this cave from topcamp in format 235.5 (float north-based azimuth)
return methods_millenial.top_camp_bearing(self.entrance)
def top_camp_bearing_letter(self): #returns bearing to this cave from topcamp in format e.g. 'NE'
return methods_millenial.top_camp_bearing_letter(self.entrance)
def lat_lon_entrance(self): #lat_lon entrance location
return methods_millenial.lat_lon_entrance(self.entrance)
class Cave_descriptionM(models.Model): #instance of this class corresponds to each of the .html files in descriptions
#each of those holds one XML field
slug = models.TextField()
explorers = models.TextField()
underground_description = models.TextField()
equipment = models.TextField()
references = models.TextField()
survey = models.TextField()
kataster_status = models.TextField()
underground_centre_line = models.TextField()
survex_file = models.TextField() #as given in .html file
notes = models.TextField()
class ExpeditionM(models.Model): #instance of this class corresponds to one expo (usually one year)
date = models.CharField(max_length=100) #date in format YYYY.MM.DD-YYYY.MM.DD
class SurveyM(models.Model): #instance of this class corresponds to one .svx file - one trip
date = models.CharField(max_length=100) #date of the trip in format YYYY.MM.DD (dated:=date given by .svx file)
survex_file = models.TextField()
class Logbook_entryM(models.Model): #instance of this class corresponds to one bit of logbook (c.f. expo.survex.com/years/2015/logbook.html or simil)
date = models.CharField(max_length=100) #date as typed into logbook
contents = models.TextField() #contents of the logbook chunk
class Parser_messageM(models.Model): #instance of this class contains one error or warining message produce by any of the parsers
parsername = models.CharField(max_length = 20) #name of parser
content = models.TextField() #content of message
message_type = models.CharField(max_length = 10) # [Error,Info] or similar

View File

@@ -1,862 +0,0 @@
import urllib, urlparse, string, os, datetime, logging, re
import subprocess
from django.forms import ModelForm
from django.db import models
from django.contrib import admin
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.db.models import Min, Max
from django.conf import settings
from decimal import Decimal, getcontext
from django.core.urlresolvers import reverse
from imagekit.models import ImageModel
from django.template import Context, loader
import settings
getcontext().prec=2 #use 2 significant figures for decimal calculations
def get_related_by_wikilinks(wiki_text):
found=re.findall(settings.QM_PATTERN,wiki_text)
res=[]
for wikilink in found:
qmdict={'urlroot':settings.URL_ROOT,'cave':wikilink[2],'year':wikilink[1],'number':wikilink[3]}
try:
cave_slugs = CaveSlug.objects.filter(cave__kataster_number = qmdict['cave'])
qm=QM.objects.get(found_by__cave_slug__in = cave_slugs,
found_by__date__year = qmdict['year'],
number = qmdict['number'])
res.append(qm)
except QM.DoesNotExist:
print('fail on '+str(wikilink))
return res
try:
logging.basicConfig(level=logging.DEBUG,
filename=settings.LOGFILE,
filemode='w')
except:
subprocess.call(settings.FIX_PERMISSIONS)
logging.basicConfig(level=logging.DEBUG,
filename=settings.LOGFILE,
filemode='w')
#This class is for adding fields and methods which all of our models will have.
class TroggleModel(models.Model):
new_since_parsing = models.BooleanField(default=False, editable=False)
non_public = models.BooleanField(default=False)
def object_name(self):
return self._meta.object_name
def get_admin_url(self):
return urlparse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk))
class Meta:
abstract = True
class TroggleImageModel(ImageModel):
new_since_parsing = models.BooleanField(default=False, editable=False)
def object_name(self):
return self._meta.object_name
def get_admin_url(self):
return urlparse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk))
class Meta:
abstract = True
#
# single Expedition, usually seen by year
#
class Expedition(TroggleModel):
year = models.CharField(max_length=20, unique=True)
name = models.CharField(max_length=100)
def __unicode__(self):
return self.year
class Meta:
ordering = ('-year',)
get_latest_by = 'year'
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('expedition', args=[self.year]))
# construction function. should be moved out
def get_expedition_day(self, date):
expeditiondays = self.expeditionday_set.filter(date=date)
if expeditiondays:
assert len(expeditiondays) == 1
return expeditiondays[0]
res = ExpeditionDay(expedition=self, date=date)
res.save()
return res
def day_min(self):
res = self.expeditionday_set.all()
return res and res[0] or None
def day_max(self):
res = self.expeditionday_set.all()
return res and res[len(res) - 1] or None
class ExpeditionDay(TroggleModel):
expedition = models.ForeignKey("Expedition")
date = models.DateField()
class Meta:
ordering = ('date',)
def GetPersonTrip(self, personexpedition):
personexpeditions = self.persontrip_set.filter(expeditionday=self)
return personexpeditions and personexpeditions[0] or None
#
# single Person, can go on many years
#
class Person(TroggleModel):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
is_vfho = models.BooleanField(help_text="VFHO is the Vereines f&uuml;r H&ouml;hlenkunde in Obersteier, a nearby Austrian caving club.", default=False)
mug_shot = models.CharField(max_length=100, blank=True,null=True)
blurb = models.TextField(blank=True,null=True)
#href = models.CharField(max_length=200)
orderref = models.CharField(max_length=200) # for alphabetic
#the below have been removed and made methods. I'm not sure what the b in bisnotable stands for. - AC 16 Feb
#notability = models.FloatField() # for listing the top 20 people
#bisnotable = models.BooleanField(default=False)
user = models.OneToOneField(User, null=True, blank=True)
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT,reverse('person',kwargs={'first_name':self.first_name,'last_name':self.last_name}))
class Meta:
verbose_name_plural = "People"
ordering = ('orderref',) # "Wookey" makes too complex for: ('last_name', 'first_name')
def __unicode__(self):
if self.last_name:
return "%s %s" % (self.first_name, self.last_name)
return self.first_name
def notability(self):
notability = Decimal(0)
for personexpedition in self.personexpedition_set.all():
if not personexpedition.is_guest:
notability += Decimal(1) / (2012 - int(personexpedition.expedition.year))
return notability
def bisnotable(self):
return self.notability() > Decimal(1)/Decimal(3)
def surveyedleglength(self):
return sum([personexpedition.surveyedleglength() for personexpedition in self.personexpedition_set.all()])
def first(self):
return self.personexpedition_set.order_by('-expedition')[0]
def last(self):
return self.personexpedition_set.order_by('expedition')[0]
#def Sethref(self):
#if self.last_name:
#self.href = self.first_name.lower() + "_" + self.last_name.lower()
#self.orderref = self.last_name + " " + self.first_name
#else:
# self.href = self.first_name.lower()
#self.orderref = self.first_name
#self.notability = 0.0 # set temporarily
#
# Person's attenance to one Expo
#
class PersonExpedition(TroggleModel):
expedition = models.ForeignKey(Expedition)
person = models.ForeignKey(Person)
slugfield = models.SlugField(max_length=50,blank=True,null=True)
is_guest = models.BooleanField(default=False)
COMMITTEE_CHOICES = (
('leader','Expo leader'),
('medical','Expo medical officer'),
('treasurer','Expo treasurer'),
('sponsorship','Expo sponsorship coordinator'),
('research','Expo research coordinator'),
)
expo_committee_position = models.CharField(blank=True,null=True,choices=COMMITTEE_CHOICES,max_length=200)
nickname = models.CharField(max_length=100,blank=True,null=True)
def GetPersonroles(self):
res = [ ]
for personrole in self.personrole_set.order_by('survexblock'):
if res and res[-1]['survexpath'] == personrole.survexblock.survexpath:
res[-1]['roles'] += ", " + str(personrole.role)
else:
res.append({'date':personrole.survexblock.date, 'survexpath':personrole.survexblock.survexpath, 'roles':str(personrole.role)})
return res
class Meta:
ordering = ('-expedition',)
#order_with_respect_to = 'expedition'
def __unicode__(self):
return "%s: (%s)" % (self.person, self.expedition)
#why is the below a function in personexpedition, rather than in person? - AC 14 Feb 09
def name(self):
if self.nickname:
return "%s (%s) %s" % (self.person.first_name, self.nickname, self.person.last_name)
if self.person.last_name:
return "%s %s" % (self.person.first_name, self.person.last_name)
return self.person.first_name
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('personexpedition',kwargs={'first_name':self.person.first_name,'last_name':self.person.last_name,'year':self.expedition.year}))
def surveyedleglength(self):
survexblocks = [personrole.survexblock for personrole in self.personrole_set.all() ]
return sum([survexblock.totalleglength for survexblock in set(survexblocks)])
# would prefer to return actual person trips so we could link to first and last ones
def day_min(self):
res = self.persontrip_set.aggregate(day_min=Min("expeditionday__date"))
return res["day_min"]
def day_max(self):
res = self.persontrip_set.all().aggregate(day_max=Max("expeditionday__date"))
return res["day_max"]
#
# Single parsed entry from Logbook
#
class LogbookEntry(TroggleModel):
date = models.DateField()#MJG wants to turn this into a datetime such that multiple Logbook entries on the same day can be ordered.
expeditionday = models.ForeignKey("ExpeditionDay", null=True)#MJG wants to KILL THIS (redundant information)
expedition = models.ForeignKey(Expedition,blank=True,null=True) # yes this is double-
#author = models.ForeignKey(PersonExpedition,blank=True,null=True) # the person who writes it up doesn't have to have been on the trip.
# Re: the above- so this field should be "typist" or something, not "author". - AC 15 jun 09
#MJG wants to KILL THIS, as it is typically redundant with PersonTrip.is_logbook_entry_author, in the rare it was not redundanty and of actually interest it could be added to the text.
title = models.CharField(max_length=settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH)
cave_slug = models.SlugField(max_length=50)
place = models.CharField(max_length=100,blank=True,null=True,help_text="Only use this if you haven't chosen a cave")
text = models.TextField()
slug = models.SlugField(max_length=50)
filename = models.CharField(max_length=200,null=True)
class Meta:
verbose_name_plural = "Logbook Entries"
# several PersonTrips point in to this object
ordering = ('-date',)
def __getattribute__(self, item):
if item == "cave": #Allow a logbookentries cave to be directly accessed despite not having a proper foreignkey
return CaveSlug.objects.get(slug = self.cave_slug).cave
return super(LogbookEntry, self).__getattribute__(item)
def __init__(self, *args, **kwargs):
if "cave" in kwargs.keys():
if kwargs["cave"] is not None:
kwargs["cave_slug"] = CaveSlug.objects.get(cave=kwargs["cave"], primary=True).slug
kwargs.pop("cave")
return super(LogbookEntry, self).__init__(*args, **kwargs)
def isLogbookEntry(self): # Function used in templates
return True
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('logbookentry',kwargs={'date':self.date,'slug':self.slug}))
def __unicode__(self):
return "%s: (%s)" % (self.date, self.title)
def get_next_by_id(self):
LogbookEntry.objects.get(id=self.id+1)
def get_previous_by_id(self):
LogbookEntry.objects.get(id=self.id-1)
def new_QM_number(self):
"""Returns """
if self.cave:
nextQMnumber=self.cave.new_QM_number(self.date.year)
else:
return none
return nextQMnumber
def new_QM_found_link(self):
"""Produces a link to a new QM with the next number filled in and this LogbookEntry set as 'found by' """
return settings.URL_ROOT + r'/admin/core/qm/add/?' + r'found_by=' + str(self.pk) +'&number=' + str(self.new_QM_number())
def DayIndex(self):
return list(self.expeditionday.logbookentry_set.all()).index(self)
#
# Single Person going on a trip, which may or may not be written up (accounts for different T/U for people in same logbook entry)
#
class PersonTrip(TroggleModel):
personexpedition = models.ForeignKey("PersonExpedition",null=True)
#expeditionday = models.ForeignKey("ExpeditionDay")#MJG wants to KILL THIS (redundant information)
#date = models.DateField() #MJG wants to KILL THIS (redundant information)
time_underground = models.FloatField(help_text="In decimal hours")
logbook_entry = models.ForeignKey(LogbookEntry)
is_logbook_entry_author = models.BooleanField(default=False)
# sequencing by person (difficult to solve locally)
#persontrip_next = models.ForeignKey('PersonTrip', related_name='pnext', blank=True,null=True)#MJG wants to KILL THIS (and use funstion persontrip_next_auto)
#persontrip_prev = models.ForeignKey('PersonTrip', related_name='pprev', blank=True,null=True)#MJG wants to KILL THIS(and use funstion persontrip_prev_auto)
def persontrip_next(self):
futurePTs = PersonTrip.objects.filter(personexpedition = self.personexpedition, logbook_entry__date__gt = self.logbook_entry.date).order_by('logbook_entry__date').all()
if len(futurePTs) > 0:
return futurePTs[0]
else:
return None
def persontrip_prev(self):
pastPTs = PersonTrip.objects.filter(personexpedition = self.personexpedition, logbook_entry__date__lt = self.logbook_entry.date).order_by('-logbook_entry__date').all()
if len(pastPTs) > 0:
return pastPTs[0]
else:
return None
def place(self):
return self.logbook_entry.cave and self.logbook_entry.cave or self.logbook_entry.place
def __unicode__(self):
return "%s (%s)" % (self.personexpedition, self.logbook_entry.date)
##########################################
# move following classes into models_cave
##########################################
class Area(TroggleModel):
short_name = models.CharField(max_length=100)
name = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True,null=True)
parent = models.ForeignKey('Area', blank=True, null=True)
def __unicode__(self):
if self.parent:
return unicode(self.parent) + u" - " + unicode(self.short_name)
else:
return unicode(self.short_name)
def kat_area(self):
if self.short_name in ["1623", "1626"]:
return self.short_name
elif self.parent:
return self.parent.kat_area()
class CaveAndEntrance(models.Model):
cave = models.ForeignKey('Cave')
entrance = models.ForeignKey('Entrance')
entrance_letter = models.CharField(max_length=20,blank=True,null=True)
def __unicode__(self):
return unicode(self.cave) + unicode(self.entrance_letter)
class CaveSlug(models.Model):
cave = models.ForeignKey('Cave')
slug = models.SlugField(max_length=50, unique = True)
primary = models.BooleanField(default=False)
class Cave(TroggleModel):
# too much here perhaps,
official_name = models.CharField(max_length=160)
area = models.ManyToManyField(Area, blank=True, null=True)
kataster_code = models.CharField(max_length=20,blank=True,null=True)
kataster_number = models.CharField(max_length=10,blank=True, null=True)
unofficial_number = models.CharField(max_length=60,blank=True, null=True)
entrances = models.ManyToManyField('Entrance', through='CaveAndEntrance')
explorers = models.TextField(blank=True,null=True)
underground_description = models.TextField(blank=True,null=True)
equipment = models.TextField(blank=True,null=True)
references = models.TextField(blank=True,null=True)
survey = models.TextField(blank=True,null=True)
kataster_status = models.TextField(blank=True,null=True)
underground_centre_line = models.TextField(blank=True,null=True)
notes = models.TextField(blank=True,null=True)
length = models.CharField(max_length=100,blank=True,null=True)
depth = models.CharField(max_length=100,blank=True,null=True)
extent = models.CharField(max_length=100,blank=True,null=True)
survex_file = models.CharField(max_length=100,blank=True,null=True)
description_file = models.CharField(max_length=200,blank=True,null=True)
url = models.CharField(max_length=200,blank=True,null=True)
filename = models.CharField(max_length=200)
#class Meta:
# unique_together = (("area", "kataster_number"), ("area", "unofficial_number"))
# FIXME Kataster Areas and CUCC defined sub areas need seperating
#href = models.CharField(max_length=100)
class Meta:
ordering = ('kataster_code', 'unofficial_number')
def hassurvey(self):
if not self.underground_centre_line:
return "No"
if (self.survey.find("<img") > -1 or self.survey.find("<a") > -1 or self.survey.find("<IMG") > -1 or self.survey.find("<A") > -1):
return "Yes"
return "Missing"
def hassurveydata(self):
if not self.underground_centre_line:
return "No"
if self.survex_file:
return "Yes"
return "Missing"
def slug(self):
primarySlugs = self.caveslug_set.filter(primary = True)
if primarySlugs:
return primarySlugs[0].slug
else:
slugs = self.caveslug_set.filter()
if slugs:
return slugs[0].slug
def ours(self):
return bool(re.search(r'CUCC', self.explorers))
def reference(self):
if self.kataster_number:
return "%s-%s" % (self.kat_area(), self.kataster_number)
else:
return "%s-%s" % (self.kat_area(), self.unofficial_number)
def get_absolute_url(self):
if self.kataster_number:
href = self.kataster_number
elif self.unofficial_number:
href = self.unofficial_number
else:
href = official_name.lower()
#return settings.URL_ROOT + '/cave/' + href + '/'
return urlparse.urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,}))
def __unicode__(self, sep = u": "):
return unicode(self.slug())
def get_QMs(self):
return QM.objects.filter(found_by__cave_slug=self.caveslug_set.all())
def new_QM_number(self, year=datetime.date.today().year):
"""Given a cave and the current year, returns the next QM number."""
try:
res=QM.objects.filter(found_by__date__year=year, found_by__cave=self).order_by('-number')[0]
except IndexError:
return 1
return res.number+1
def kat_area(self):
for a in self.area.all():
if a.kat_area():
return a.kat_area()
def entrances(self):
return CaveAndEntrance.objects.filter(cave=self)
def singleentrance(self):
return len(CaveAndEntrance.objects.filter(cave=self)) == 1
def entrancelist(self):
rs = []
res = ""
for e in CaveAndEntrance.objects.filter(cave=self):
rs.append(e.entrance_letter)
rs.sort()
prevR = None
n = 0
for r in rs:
if prevR:
if chr(ord(prevR) + 1 ) == r:
prevR = r
n += 1
else:
if n == 0:
res += ", " + prevR
else:
res += "&ndash;" + prevR
else:
prevR = r
n = 0
res += r
if n == 0:
res += ", " + prevR
else:
res += "&ndash;" + prevR
return res
def writeDataFile(self):
try:
f = open(os.path.join(settings.CAVEDESCRIPTIONS, self.filename), "w")
except:
subprocess.call(settings.FIX_PERMISSIONS)
f = open(os.path.join(settings.CAVEDESCRIPTIONS, self.filename), "w")
t = loader.get_template('dataformat/cave.xml')
c = Context({'cave': self})
u = t.render(c)
u8 = u.encode("utf-8")
f.write(u8)
f.close()
def getArea(self):
areas = self.area.all()
lowestareas = list(areas)
for area in areas:
if area.parent in areas:
try:
lowestareas.remove(area.parent)
except:
pass
return lowestareas[0]
def getCaveByReference(reference):
areaname, code = reference.split("-", 1)
print(areaname, code)
area = Area.objects.get(short_name = areaname)
print(area)
foundCaves = list(Cave.objects.filter(area = area, kataster_number = code).all()) + list(Cave.objects.filter(area = area, unofficial_number = code).all())
print(list(foundCaves))
assert len(foundCaves) == 1
return foundCaves[0]
class OtherCaveName(TroggleModel):
name = models.CharField(max_length=160)
cave = models.ForeignKey(Cave)
def __unicode__(self):
return unicode(self.name)
class EntranceSlug(models.Model):
entrance = models.ForeignKey('Entrance')
slug = models.SlugField(max_length=50, unique = True)
primary = models.BooleanField(default=False)
class Entrance(TroggleModel):
name = models.CharField(max_length=100, blank=True,null=True)
entrance_description = models.TextField(blank=True,null=True)
explorers = models.TextField(blank=True,null=True)
map_description = models.TextField(blank=True,null=True)
location_description = models.TextField(blank=True,null=True)
approach = models.TextField(blank=True,null=True)
underground_description = models.TextField(blank=True,null=True)
photo = models.TextField(blank=True,null=True)
MARKING_CHOICES = (
('P', 'Paint'),
('P?', 'Paint (?)'),
('T', 'Tag'),
('T?', 'Tag (?)'),
('R', 'Needs Retag'),
('S', 'Spit'),
('S?', 'Spit (?)'),
('U', 'Unmarked'),
('?', 'Unknown'))
marking = models.CharField(max_length=2, choices=MARKING_CHOICES)
marking_comment = models.TextField(blank=True,null=True)
FINDABLE_CHOICES = (
('?', 'To be confirmed ...'),
('S', 'Coordinates'),
('L', 'Lost'),
('R', 'Refindable'))
findability = models.CharField(max_length=1, choices=FINDABLE_CHOICES, blank=True, null=True)
findability_description = models.TextField(blank=True,null=True)
alt = models.TextField(blank=True, null=True)
northing = models.TextField(blank=True, null=True)
easting = models.TextField(blank=True, null=True)
tag_station = models.TextField(blank=True, null=True)
exact_station = models.TextField(blank=True, null=True)
other_station = models.TextField(blank=True, null=True)
other_description = models.TextField(blank=True,null=True)
bearings = models.TextField(blank=True,null=True)
url = models.CharField(max_length=200,blank=True,null=True)
filename = models.CharField(max_length=200)
cached_primary_slug = models.CharField(max_length=200,blank=True,null=True)
def __unicode__(self):
return unicode(self.slug())
def exact_location(self):
return SurvexStation.objects.lookup(self.exact_station)
def other_location(self):
return SurvexStation.objects.lookup(self.other_station)
def find_location(self):
r = {'': 'To be entered ',
'?': 'To be confirmed:',
'S': '',
'L': 'Lost:',
'R': 'Refindable:'}[self.findability]
if self.tag_station:
try:
s = SurvexStation.objects.lookup(self.tag_station)
return r + "%0.0fE %0.0fN %0.0fAlt" % (s.x, s.y, s.z)
except:
return r + "%s Tag Station not in dataset" % self.tag_station
if self.exact_station:
try:
s = SurvexStation.objects.lookup(self.exact_station)
return r + "%0.0fE %0.0fN %0.0fAlt" % (s.x, s.y, s.z)
except:
return r + "%s Exact Station not in dataset" % self.tag_station
if self.other_station:
try:
s = SurvexStation.objects.lookup(self.other_station)
return r + "%0.0fE %0.0fN %0.0fAlt %s" % (s.x, s.y, s.z, self.other_description)
except:
return r + "%s Other Station not in dataset" % self.tag_station
if self.FINDABLE_CHOICES == "S":
r += "ERROR, Entrance has been surveyed but has no survex point"
if self.bearings:
return r + self.bearings
return r
def best_station(self):
if self.tag_station:
return self.tag_station
if self.exact_station:
return self.exact_station
if self.other_station:
return self.other_station
def has_photo(self):
if self.photo:
if (self.photo.find("<img") > -1 or self.photo.find("<a") > -1 or self.photo.find("<IMG") > -1 or self.photo.find("<A") > -1):
return "Yes"
else:
return "Missing"
else:
return "No"
def marking_val(self):
for m in self.MARKING_CHOICES:
if m[0] == self.marking:
return m[1]
def findability_val(self):
for f in self.FINDABLE_CHOICES:
if f[0] == self.findability:
return f[1]
def tag(self):
return SurvexStation.objects.lookup(self.tag_station)
def needs_surface_work(self):
return self.findability != "S" or not self.has_photo or self.marking != "T"
def get_absolute_url(self):
ancestor_titles='/'.join([subcave.title for subcave in self.get_ancestors()])
if ancestor_titles:
res = '/'.join((self.get_root().cave.get_absolute_url(), ancestor_titles, self.title))
else:
res = '/'.join((self.get_root().cave.get_absolute_url(), self.title))
return res
def slug(self):
if not self.cached_primary_slug:
primarySlugs = self.entranceslug_set.filter(primary = True)
if primarySlugs:
self.cached_primary_slug = primarySlugs[0].slug
self.save()
else:
slugs = self.entranceslug_set.filter()
if slugs:
self.cached_primary_slug = slugs[0].slug
self.save()
return self.cached_primary_slug
def writeDataFile(self):
try:
f = open(os.path.join(settings.ENTRANCEDESCRIPTIONS, self.filename), "w")
except:
subprocess.call(settings.FIX_PERMISSIONS)
f = open(os.path.join(settings.ENTRANCEDESCRIPTIONS, self.filename), "w")
t = loader.get_template('dataformat/entrance.xml')
c = Context({'entrance': self})
u = t.render(c)
u8 = u.encode("utf-8")
f.write(u8)
f.close()
class CaveDescription(TroggleModel):
short_name = models.CharField(max_length=50, unique = True)
long_name = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True,null=True)
linked_subcaves = models.ManyToManyField("NewSubCave", blank=True,null=True)
linked_entrances = models.ManyToManyField("Entrance", blank=True,null=True)
linked_qms = models.ManyToManyField("QM", blank=True,null=True)
def __unicode__(self):
if self.long_name:
return unicode(self.long_name)
else:
return unicode(self.short_name)
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('cavedescription', args=(self.short_name,)))
def save(self):
"""
Overridden save method which stores wikilinks in text as links in database.
"""
super(CaveDescription, self).save()
qm_list=get_related_by_wikilinks(self.description)
for qm in qm_list:
self.linked_qms.add(qm)
super(CaveDescription, self).save()
class NewSubCave(TroggleModel):
name = models.CharField(max_length=200, unique = True)
def __unicode__(self):
return unicode(self.name)
class QM(TroggleModel):
#based on qm.csv in trunk/expoweb/1623/204 which has the fields:
#"Number","Grade","Area","Description","Page reference","Nearest station","Completion description","Comment"
found_by = models.ForeignKey(LogbookEntry, related_name='QMs_found',blank=True, null=True )
ticked_off_by = models.ForeignKey(LogbookEntry, related_name='QMs_ticked_off',null=True,blank=True)
#cave = models.ForeignKey(Cave)
#expedition = models.ForeignKey(Expedition)
number = models.IntegerField(help_text="this is the sequential number in the year", )
GRADE_CHOICES=(
('A', 'A: Large obvious lead'),
('B', 'B: Average lead'),
('C', 'C: Tight unpromising lead'),
('D', 'D: Dig'),
('X', 'X: Unclimbable aven')
)
grade = models.CharField(max_length=1, choices=GRADE_CHOICES)
location_description = models.TextField(blank=True)
#should be a foreignkey to surveystation
nearest_station_description = models.CharField(max_length=400,null=True,blank=True)
nearest_station = models.CharField(max_length=200,blank=True,null=True)
area = models.CharField(max_length=100,blank=True,null=True)
completion_description = models.TextField(blank=True,null=True)
comment=models.TextField(blank=True,null=True)
def __unicode__(self):
return u"%s %s" % (self.code(), self.grade)
def code(self):
return u"%s-%s-%s" % (unicode(self.found_by.cave)[6:], self.found_by.date.year, self.number)
def get_absolute_url(self):
#return settings.URL_ROOT + '/cave/' + self.found_by.cave.kataster_number + '/' + str(self.found_by.date.year) + '-' + '%02d' %self.number
return urlparse.urljoin(settings.URL_ROOT, reverse('qm',kwargs={'cave_id':self.found_by.cave.kataster_number,'year':self.found_by.date.year,'qm_id':self.number,'grade':self.grade}))
def get_next_by_id(self):
return QM.objects.get(id=self.id+1)
def get_previous_by_id(self):
return QM.objects.get(id=self.id-1)
def wiki_link(self):
return u"%s%s%s" % ('[[QM:',self.code(),']]')
photoFileStorage = FileSystemStorage(location=settings.PHOTOS_ROOT, base_url=settings.PHOTOS_URL)
class DPhoto(TroggleImageModel):
caption = models.CharField(max_length=1000,blank=True,null=True)
contains_logbookentry = models.ForeignKey(LogbookEntry,blank=True,null=True)
contains_person = models.ManyToManyField(Person,blank=True,null=True)
file = models.ImageField(storage=photoFileStorage, upload_to='.',)
is_mugshot = models.BooleanField(default=False)
contains_cave = models.ForeignKey(Cave,blank=True,null=True)
contains_entrance = models.ForeignKey(Entrance, related_name="photo_file",blank=True,null=True)
#nearest_survey_point = models.ForeignKey(SurveyStation,blank=True,null=True)
nearest_QM = models.ForeignKey(QM,blank=True,null=True)
lon_utm = models.FloatField(blank=True,null=True)
lat_utm = models.FloatField(blank=True,null=True)
class IKOptions:
spec_module = 'core.imagekit_specs'
cache_dir = 'thumbs'
image_field = 'file'
#content_type = models.ForeignKey(ContentType)
#object_id = models.PositiveIntegerField()
#location = generic.GenericForeignKey('content_type', 'object_id')
def __unicode__(self):
return self.caption
scansFileStorage = FileSystemStorage(location=settings.SURVEY_SCANS, base_url=settings.SURVEYS_URL)
def get_scan_path(instance, filename):
year=instance.survey.expedition.year
#print("WN: ", type(instance.survey.wallet_number), instance.survey.wallet_number, instance.survey.wallet_letter)
number=str(instance.survey.wallet_number)
if str(instance.survey.wallet_letter) != "None":
number=str(instance.survey.wallet_letter) + number #two strings formatting because convention is 2009#01 or 2009#X01
return os.path.join('./',year,year+r'#'+number,str(instance.contents)+str(instance.number_in_wallet)+r'.jpg')
class ScannedImage(TroggleImageModel):
file = models.ImageField(storage=scansFileStorage, upload_to=get_scan_path)
scanned_by = models.ForeignKey(Person,blank=True, null=True)
scanned_on = models.DateField(null=True)
survey = models.ForeignKey('Survey')
contents = models.CharField(max_length=20,choices=(('notes','notes'),('plan','plan_sketch'),('elevation','elevation_sketch')))
number_in_wallet = models.IntegerField(null=True)
lon_utm = models.FloatField(blank=True,null=True)
lat_utm = models.FloatField(blank=True,null=True)
class IKOptions:
spec_module = 'core.imagekit_specs'
cache_dir = 'thumbs'
image_field = 'file'
#content_type = models.ForeignKey(ContentType)
#object_id = models.PositiveIntegerField()
#location = generic.GenericForeignKey('content_type', 'object_id')
#This is an ugly hack to deal with the #s in our survey scan paths. The correct thing is to write a custom file storage backend which calls urlencode on the name for making file.url but not file.path.
def correctURL(self):
return string.replace(self.file.url,r'#',r'%23')
def __unicode__(self):
return get_scan_path(self,'')
class Survey(TroggleModel):
expedition = models.ForeignKey('Expedition') #REDUNDANT (logbook_entry)
wallet_number = models.IntegerField(blank=True,null=True)
wallet_letter = models.CharField(max_length=1,blank=True,null=True)
comments = models.TextField(blank=True,null=True)
location = models.CharField(max_length=400,blank=True,null=True) #REDUNDANT
subcave = models.ForeignKey('NewSubCave', blank=True, null=True)
#notes_scan = models.ForeignKey('ScannedImage',related_name='notes_scan',blank=True, null=True) #Replaced by contents field of ScannedImage model
survex_block = models.OneToOneField('SurvexBlock',blank=True, null=True)
logbook_entry = models.ForeignKey('LogbookEntry')
centreline_printed_on = models.DateField(blank=True, null=True)
centreline_printed_by = models.ForeignKey('Person',related_name='centreline_printed_by',blank=True,null=True)
#sketch_scan = models.ForeignKey(ScannedImage,blank=True, null=True) #Replaced by contents field of ScannedImage model
tunnel_file = models.FileField(upload_to='surveyXMLfiles',blank=True, null=True)
tunnel_main_sketch = models.ForeignKey('Survey',blank=True,null=True)
integrated_into_main_sketch_on = models.DateField(blank=True,null=True)
integrated_into_main_sketch_by = models.ForeignKey('Person' ,related_name='integrated_into_main_sketch_by', blank=True,null=True)
rendered_image = models.ImageField(upload_to='renderedSurveys',blank=True,null=True)
def __unicode__(self):
return self.expedition.year+"#"+"%02d" % int(self.wallet_number)
def notes(self):
return self.scannedimage_set.filter(contents='notes')
def plans(self):
return self.scannedimage_set.filter(contents='plan')
def elevations(self):
return self.scannedimage_set.filter(contents='elevation')

View File

@@ -9,7 +9,7 @@ from django.core.urlresolvers import reverse
########################################################### ###########################################################
# These will allow browsing and editing of the survex data # These will allow browsing and editing of the survex data
########################################################### ###########################################################
# Needs to add: # Needs to add:
# Equates # Equates
# reloading # reloading
@@ -18,29 +18,37 @@ class SurvexDirectory(models.Model):
cave = models.ForeignKey('Cave', blank=True, null=True) cave = models.ForeignKey('Cave', blank=True, null=True)
primarysurvexfile = models.ForeignKey('SurvexFile', related_name='primarysurvexfile', blank=True, null=True) primarysurvexfile = models.ForeignKey('SurvexFile', related_name='primarysurvexfile', blank=True, null=True)
# could also include files in directory but not referenced # could also include files in directory but not referenced
def __unicode__(self):
return self.path
class Meta: class Meta:
ordering = ('id',) ordering = ('path',)
class SurvexFile(models.Model): class SurvexFile(models.Model):
path = models.CharField(max_length=200) path = models.CharField(max_length=200)
survexdirectory = models.ForeignKey("SurvexDirectory", blank=True, null=True) survexdirectory = models.ForeignKey("SurvexDirectory", blank=True, null=True)
cave = models.ForeignKey('Cave', blank=True, null=True) cave = models.ForeignKey('Cave', blank=True, null=True)
class Meta: class Meta:
ordering = ('id',) ordering = ('id',)
def __unicode__(self):
return self.path + '.svx' or 'no file'
def exists(self): def exists(self):
fname = os.path.join(settings.SURVEX_DATA, self.path + ".svx") fname = os.path.join(settings.SURVEX_DATA, self.path + ".svx")
return os.path.isfile(fname) return os.path.isfile(fname)
def OpenFile(self): def OpenFile(self):
fname = os.path.join(settings.SURVEX_DATA, self.path + ".svx") fname = os.path.join(settings.SURVEX_DATA, self.path + ".svx")
return open(fname) return open(fname)
def SetDirectory(self): def SetDirectory(self):
dirpath = os.path.split(self.path)[0] dirpath = os.path.split(self.path)[0]
survexdirectorylist = SurvexDirectory.objects.filter(cave=self.cave, path=dirpath) survexdirectorylist = SurvexDirectory.objects.filter(cave=self.cave, path=dirpath)
# if self.cave is '' or self.cave is None:
# print('No cave set for survex dir %s' % self.path)
if survexdirectorylist: if survexdirectorylist:
self.survexdirectory = survexdirectorylist[0] self.survexdirectory = survexdirectorylist[0]
else: else:
@@ -59,14 +67,20 @@ class SurvexStationLookUpManager(models.Manager):
name__iexact = stationname) name__iexact = stationname)
class SurvexStation(models.Model): class SurvexStation(models.Model):
name = models.CharField(max_length=100) name = models.CharField(max_length=100)
block = models.ForeignKey('SurvexBlock') block = models.ForeignKey('SurvexBlock')
equate = models.ForeignKey('SurvexEquate', blank=True, null=True) equate = models.ForeignKey('SurvexEquate', blank=True, null=True)
objects = SurvexStationLookUpManager() objects = SurvexStationLookUpManager()
x = models.FloatField(blank=True, null=True) x = models.FloatField(blank=True, null=True)
y = models.FloatField(blank=True, null=True) y = models.FloatField(blank=True, null=True)
z = models.FloatField(blank=True, null=True) z = models.FloatField(blank=True, null=True)
def __unicode__(self):
if self.block.cave:
# If we haven't got a cave we can't have a slug, saves a nonetype return
return self.block.cave.slug() + '/' + self.block.name + '/' + self.name or 'No station name'
else:
return str(self.block.cave) + '/' + self.block.name + '/' + self.name or 'No station name'
def path(self): def path(self):
r = self.name r = self.name
b = self.block b = self.block
@@ -89,15 +103,15 @@ class SurvexLeg(models.Model):
# #
# Single SurvexBlock # Single SurvexBlock
# #
class SurvexBlockLookUpManager(models.Manager): class SurvexBlockLookUpManager(models.Manager):
def lookup(self, name): def lookup(self, name):
if name == "": if name == "":
blocknames = [] blocknames = []
else: else:
blocknames = name.split(".") blocknames = name.split(".")
block = SurvexBlock.objects.get(parent=None, survexfile__path="all") block = SurvexBlock.objects.get(parent=None, survexfile__path=settings.SURVEX_TOPNAME)
for blockname in blocknames: for blockname in blocknames:
block = SurvexBlock.objects.get(parent=block, name__iexact=blockname) block = SurvexBlock.objects.get(parent=block, name__iexact=blockname)
return block return block
@@ -108,20 +122,20 @@ class SurvexBlock(models.Model):
parent = models.ForeignKey('SurvexBlock', blank=True, null=True) parent = models.ForeignKey('SurvexBlock', blank=True, null=True)
text = models.TextField() text = models.TextField()
cave = models.ForeignKey('Cave', blank=True, null=True) cave = models.ForeignKey('Cave', blank=True, null=True)
date = models.DateField(blank=True, null=True) date = models.DateTimeField(blank=True, null=True)
expeditionday = models.ForeignKey("ExpeditionDay", null=True) expeditionday = models.ForeignKey("ExpeditionDay", null=True)
expedition = models.ForeignKey('Expedition', blank=True, null=True) expedition = models.ForeignKey('Expedition', blank=True, null=True)
survexfile = models.ForeignKey("SurvexFile", blank=True, null=True) survexfile = models.ForeignKey("SurvexFile", blank=True, null=True)
begin_char = models.IntegerField() # code for where in the survex data files this block sits begin_char = models.IntegerField() # code for where in the survex data files this block sits
survexpath = models.CharField(max_length=200) # the path for the survex stations survexpath = models.CharField(max_length=200) # the path for the survex stations
survexscansfolder = models.ForeignKey("SurvexScansFolder", null=True) survexscansfolder = models.ForeignKey("SurvexScansFolder", null=True)
#refscandir = models.CharField(max_length=100) #refscandir = models.CharField(max_length=100)
totalleglength = models.FloatField() totalleglength = models.FloatField()
class Meta: class Meta:
ordering = ('id',) ordering = ('id',)
@@ -130,7 +144,7 @@ class SurvexBlock(models.Model):
def __unicode__(self): def __unicode__(self):
return self.name and unicode(self.name) or 'no name' return self.name and unicode(self.name) or 'no name'
def GetPersonroles(self): def GetPersonroles(self):
res = [ ] res = [ ]
for personrole in self.personrole_set.order_by('personexpedition'): for personrole in self.personrole_set.order_by('personexpedition'):
@@ -149,10 +163,10 @@ class SurvexBlock(models.Model):
ss = SurvexStation(name=name, block=self) ss = SurvexStation(name=name, block=self)
ss.save() ss.save()
return ss return ss
def DayIndex(self): def DayIndex(self):
return list(self.expeditionday.survexblock_set.all()).index(self) return list(self.expeditionday.survexblock_set.all()).index(self)
class SurvexTitle(models.Model): class SurvexTitle(models.Model):
survexblock = models.ForeignKey('SurvexBlock') survexblock = models.ForeignKey('SurvexBlock')
@@ -177,39 +191,45 @@ ROLE_CHOICES = (
class SurvexPersonRole(models.Model): class SurvexPersonRole(models.Model):
survexblock = models.ForeignKey('SurvexBlock') survexblock = models.ForeignKey('SurvexBlock')
nrole = models.CharField(choices=ROLE_CHOICES, max_length=200, blank=True, null=True) nrole = models.CharField(choices=ROLE_CHOICES, max_length=200, blank=True, null=True)
# increasing levels of precision # increasing levels of precision
personname = models.CharField(max_length=100) personname = models.CharField(max_length=100)
person = models.ForeignKey('Person', blank=True, null=True) person = models.ForeignKey('Person', blank=True, null=True)
personexpedition = models.ForeignKey('PersonExpedition', blank=True, null=True) personexpedition = models.ForeignKey('PersonExpedition', blank=True, null=True)
persontrip = models.ForeignKey('PersonTrip', blank=True, null=True) persontrip = models.ForeignKey('PersonTrip', blank=True, null=True)
expeditionday = models.ForeignKey("ExpeditionDay", null=True) expeditionday = models.ForeignKey("ExpeditionDay", null=True)
def __unicode__(self): def __unicode__(self):
return unicode(self.person) + " - " + unicode(self.survexblock) + " - " + unicode(self.nrole) return unicode(self.person) + " - " + unicode(self.survexblock) + " - " + unicode(self.nrole)
class SurvexScansFolder(models.Model): class SurvexScansFolder(models.Model):
fpath = models.CharField(max_length=200) fpath = models.CharField(max_length=200)
walletname = models.CharField(max_length=200) walletname = models.CharField(max_length=200)
class Meta: class Meta:
ordering = ('walletname',) ordering = ('walletname',)
def __unicode__(self):
return self.walletname or 'no wallet'
def get_absolute_url(self): def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('surveyscansfolder', kwargs={"path":re.sub("#", "%23", self.walletname)})) return urlparse.urljoin(settings.URL_ROOT, reverse('surveyscansfolder', kwargs={"path":re.sub("#", "%23", self.walletname)}))
class SurvexScanSingle(models.Model): class SurvexScanSingle(models.Model):
ffile = models.CharField(max_length=200) ffile = models.CharField(max_length=200)
name = models.CharField(max_length=200) name = models.CharField(max_length=200)
survexscansfolder = models.ForeignKey("SurvexScansFolder", null=True) survexscansfolder = models.ForeignKey("SurvexScansFolder", null=True)
class Meta: class Meta:
ordering = ('name',) ordering = ('name',)
def __unicode__(self):
return self.survexscansfolder.walletname + '/' + self.name
def get_absolute_url(self): def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('surveyscansingle', kwargs={"path":re.sub("#", "%23", self.survexscansfolder.walletname), "file":self.name})) return urlparse.urljoin(settings.URL_ROOT, reverse('surveyscansingle', kwargs={"path":re.sub("#", "%23", self.survexscansfolder.walletname), "file":self.name}))
class TunnelFile(models.Model): class TunnelFile(models.Model):
tunnelpath = models.CharField(max_length=200) tunnelpath = models.CharField(max_length=200)
tunnelname = models.CharField(max_length=200) tunnelname = models.CharField(max_length=200)
@@ -221,8 +241,8 @@ class TunnelFile(models.Model):
filesize = models.IntegerField(default=0) filesize = models.IntegerField(default=0)
npaths = models.IntegerField(default=0) npaths = models.IntegerField(default=0)
survextitles = models.ManyToManyField("SurvexTitle") survextitles = models.ManyToManyField("SurvexTitle")
class Meta: class Meta:
ordering = ('tunnelpath',) ordering = ('tunnelpath',)

View File

@@ -47,6 +47,6 @@ def survex_to_html(value, autoescape=None):
if autoescape: if autoescape:
value = conditional_escape(value) value = conditional_escape(value)
for regex, sub in regexes: for regex, sub in regexes:
print sub print(sub)
value = regex.sub(sub, value) value = regex.sub(sub, value)
return mark_safe(value) return mark_safe(value)

View File

@@ -7,7 +7,6 @@ from troggle.core.models import QM, DPhoto, LogbookEntry, Cave
import re, urlparse import re, urlparse
register = template.Library() register = template.Library()
@register.filter() @register.filter()
def plusone(n): def plusone(n):
@@ -77,7 +76,7 @@ def wiki_to_html_short(value, autoescape=None):
if number>1: if number>1:
return '<h'+num+'>'+matchobj.groups()[1]+'</h'+num+'>' return '<h'+num+'>'+matchobj.groups()[1]+'</h'+num+'>'
else: else:
print 'morethanone' print('morethanone')
return matchobj.group() return matchobj.group()
value = re.sub(r"(?m)^(=+)([^=]+)(=+)$",headerrepl,value) value = re.sub(r"(?m)^(=+)([^=]+)(=+)$",headerrepl,value)
@@ -143,13 +142,13 @@ def wiki_to_html_short(value, autoescape=None):
value = re.sub(photoSrcPattern,photoSrcRepl, value, re.DOTALL) value = re.sub(photoSrcPattern,photoSrcRepl, value, re.DOTALL)
#make cave links #make cave links
value = re.sub("\[\[\s*cave:([^\s]+)\s*\s*\]\]", r'<a href="%scave/\1/">\1</a>' % settings.URL_ROOT, value, re.DOTALL) value = re.sub(r"\[\[\s*cave:([^\s]+)\s*\s*\]\]", r'<a href="%scave/\1/">\1</a>' % settings.URL_ROOT, value, re.DOTALL)
#make people links #make people links
value = re.sub("\[\[\s*person:(.+)\|(.+)\]\]",r'<a href="%sperson/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL) value = re.sub(r"\[\[\s*person:(.+)\|(.+)\]\]",r'<a href="%sperson/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
#make subcave links #make subcave links
value = re.sub("\[\[\s*subcave:(.+)\|(.+)\]\]",r'<a href="%ssubcave/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL) value = re.sub(r"\[\[\s*subcave:(.+)\|(.+)\]\]",r'<a href="%ssubcave/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
#make cavedescription links #make cavedescription links
value = re.sub("\[\[\s*cavedescription:(.+)\|(.+)\]\]",r'<a href="%scavedescription/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL) value = re.sub(r"\[\[\s*cavedescription:(.+)\|(.+)\]\]",r'<a href="%scavedescription/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)

View File

@@ -1,6 +1,6 @@
from django.conf import settings from django.conf import settings
import fileAbstraction import fileAbstraction
from django.shortcuts import render_to_response from django.shortcuts import render
from django.http import HttpResponse, Http404 from django.http import HttpResponse, Http404
import os, stat import os, stat
import re import re
@@ -8,7 +8,7 @@ from troggle.core.models import SurvexScansFolder, SurvexScanSingle, SurvexBlock
import parsers.surveys import parsers.surveys
import urllib import urllib
# inline fileabstraction into here if it's not going to be useful anywhere else # inline fileabstraction into here if it's not going to be useful anywhere else
# keep things simple and ignore exceptions everywhere for now # keep things simple and ignore exceptions everywhere for now
@@ -33,7 +33,7 @@ def upload(request, path):
def download(request, path): def download(request, path):
#try: #try:
return HttpResponse(fileAbstraction.readFile(path), content_type=getMimeType(path.split(".")[-1])) return HttpResponse(fileAbstraction.readFile(path), content_type=getMimeType(path.split(".")[-1]))
#except: #except:
# raise Http404 # raise Http404
@@ -49,32 +49,32 @@ extmimetypes = {".txt": "text/plain",
".jpg": "image/jpeg", ".jpg": "image/jpeg",
".jpeg": "image/jpeg", ".jpeg": "image/jpeg",
} }
# dead # dead
def jgtfile(request, f): def jgtfile(request, f):
fp = os.path.join(settings.SURVEYS, f) fp = os.path.join(settings.SURVEYS, f)
# could also surf through SURVEX_DATA # could also surf through SURVEX_DATA
# directory listing # directory listing
if os.path.isdir(fp): if os.path.isdir(fp):
listdirfiles = [ ] listdirfiles = [ ]
listdirdirs = [ ] listdirdirs = [ ]
for lf in sorted(os.listdir(fp)): for lf in sorted(os.listdir(fp)):
hpath = os.path.join(f, lf) # not absolute path hpath = os.path.join(f, lf) # not absolute path
if lf[0] == "." or lf[-1] == "~": if lf[0] == "." or lf[-1] == "~":
continue continue
hpath = hpath.replace("\\", "/") # for windows users hpath = hpath.replace("\\", "/") # for windows users
href = hpath.replace("#", "%23") # '#' in file name annoyance href = hpath.replace("#", "%23") # '#' in file name annoyance
flf = os.path.join(fp, lf) flf = os.path.join(fp, lf)
if os.path.isdir(flf): if os.path.isdir(flf):
nfiles = len([sf for sf in os.listdir(flf) if sf[0] != "."]) nfiles = len([sf for sf in os.listdir(flf) if sf[0] != "."])
listdirdirs.append((href, hpath + "/", nfiles)) listdirdirs.append((href, hpath + "/", nfiles))
else: else:
listdirfiles.append((href, hpath, os.path.getsize(flf))) listdirfiles.append((href, hpath, os.path.getsize(flf)))
upperdirs = [ ] upperdirs = [ ]
lf = f lf = f
while lf: while lf:
@@ -85,9 +85,9 @@ def jgtfile(request, f):
lf = os.path.split(lf)[0] lf = os.path.split(lf)[0]
upperdirs.append((href, hpath)) upperdirs.append((href, hpath))
upperdirs.append(("", "/")) upperdirs.append(("", "/"))
return render_to_response('listdir.html', {'file':f, 'listdirfiles':listdirfiles, 'listdirdirs':listdirdirs, 'upperdirs':upperdirs, 'settings': settings}) return render(request, 'listdir.html', {'file':f, 'listdirfiles':listdirfiles, 'listdirdirs':listdirdirs, 'upperdirs':upperdirs, 'settings': settings})
# flat output of file when loaded # flat output of file when loaded
if os.path.isfile(fp): if os.path.isfile(fp):
ext = os.path.splitext(fp)[1].lower() ext = os.path.splitext(fp)[1].lower()
@@ -123,16 +123,16 @@ def SaveImageInDir(name, imgdir, project, fdata, bbinary):
print "*** Making directory", fprojdir print "*** Making directory", fprojdir
os.path.mkdir(fprojdir) os.path.mkdir(fprojdir)
print "hhh" print "hhh"
fname = os.path.join(fprojdir, name) fname = os.path.join(fprojdir, name)
print fname, "fff" print fname, "fff"
fname = UniqueFile(fname) fname = UniqueFile(fname)
p2, p1 = os.path.split(fname) p2, p1 = os.path.split(fname)
p3, p2 = os.path.split(p2) p3, p2 = os.path.split(p2)
p4, p3 = os.path.split(p3) p4, p3 = os.path.split(p3)
res = os.path.join(p3, p2, p1) res = os.path.join(p3, p2, p1)
print "saving file", fname print "saving file", fname
fout = open(fname, (bbinary and "wb" or "w")) fout = open(fname, (bbinary and "wb" or "w"))
fout.write(fdata.read()) fout.write(fdata.read())
@@ -163,73 +163,73 @@ def jgtuploadfile(request):
#print ("FFF", request.FILES.values()) #print ("FFF", request.FILES.values())
message = "" message = ""
print "gothere" print "gothere"
return render_to_response('fileupload.html', {'message':message, 'filesuploaded':filesuploaded, 'settings': settings}) return render(request, 'fileupload.html', {'message':message, 'filesuploaded':filesuploaded, 'settings': settings})
def surveyscansfolder(request, path): def surveyscansfolder(request, path):
#print [ s.walletname for s in SurvexScansFolder.objects.all() ] #print [ s.walletname for s in SurvexScansFolder.objects.all() ]
survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.unquote(path)) survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.unquote(path))
return render_to_response('survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings }) return render(request, 'survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings })
def surveyscansingle(request, path, file): def surveyscansingle(request, path, file):
survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.unquote(path)) survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.unquote(path))
survexscansingle = SurvexScanSingle.objects.get(survexscansfolder=survexscansfolder, name=file) survexscansingle = SurvexScanSingle.objects.get(survexscansfolder=survexscansfolder, name=file)
return HttpResponse(content=open(survexscansingle.ffile), content_type=getMimeType(path.split(".")[-1])) return HttpResponse(content=open(survexscansingle.ffile), content_type=getMimeType(path.split(".")[-1]))
#return render_to_response('survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings }) #return render(request, 'survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings })
def surveyscansfolders(request): def surveyscansfolders(request):
survexscansfolders = SurvexScansFolder.objects.all() survexscansfolders = SurvexScansFolder.objects.all()
return render_to_response('survexscansfolders.html', { 'survexscansfolders':survexscansfolders, 'settings': settings }) return render(request, 'survexscansfolders.html', { 'survexscansfolders':survexscansfolders, 'settings': settings })
def tunneldata(request): def tunneldata(request):
tunnelfiles = TunnelFile.objects.all() tunnelfiles = TunnelFile.objects.all()
return render_to_response('tunnelfiles.html', { 'tunnelfiles':tunnelfiles, 'settings': settings }) return render(request, 'tunnelfiles.html', { 'tunnelfiles':tunnelfiles, 'settings': settings })
def tunnelfile(request, path): def tunnelfile(request, path):
tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.unquote(path)) tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.unquote(path))
tfile = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath) tfile = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath)
return HttpResponse(content=open(tfile), content_type="text/plain") return HttpResponse(content=open(tfile), content_type="text/plain")
def tunnelfileupload(request, path): def tunnelfileupload(request, path):
tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.unquote(path)) tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.unquote(path))
tfile = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath) tfile = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath)
project, user, password, tunnelversion = request.POST["tunnelproject"], request.POST["tunneluser"], request.POST["tunnelpassword"], request.POST["tunnelversion"] project, user, password, tunnelversion = request.POST["tunnelproject"], request.POST["tunneluser"], request.POST["tunnelpassword"], request.POST["tunnelversion"]
print (project, user, tunnelversion) print (project, user, tunnelversion)
assert len(request.FILES.values()) == 1, "only one file to upload" assert len(request.FILES.values()) == 1, "only one file to upload"
uploadedfile = request.FILES.values()[0] uploadedfile = request.FILES.values()[0]
if uploadedfile.field_name != "sketch": if uploadedfile.field_name != "sketch":
return HttpResponse(content="Error: non-sketch file uploaded", content_type="text/plain") return HttpResponse(content="Error: non-sketch file uploaded", content_type="text/plain")
if uploadedfile.content_type != "text/plain": if uploadedfile.content_type != "text/plain":
return HttpResponse(content="Error: non-plain content type", content_type="text/plain") return HttpResponse(content="Error: non-plain content type", content_type="text/plain")
# could use this to add new files # could use this to add new files
if os.path.split(path)[1] != uploadedfile.name: if os.path.split(path)[1] != uploadedfile.name:
return HttpResponse(content="Error: name disagrees", content_type="text/plain") return HttpResponse(content="Error: name disagrees", content_type="text/plain")
orgsize = tunnelfile.filesize # = os.stat(tfile)[stat.ST_SIZE] orgsize = tunnelfile.filesize # = os.stat(tfile)[stat.ST_SIZE]
ttext = uploadedfile.read() ttext = uploadedfile.read()
# could check that the user and projects agree here # could check that the user and projects agree here
fout = open(tfile, "w") fout = open(tfile, "w")
fout.write(ttext) fout.write(ttext)
fout.close() fout.close()
# redo its settings of # redo its settings of
parsers.surveys.SetTunnelfileInfo(tunnelfile) parsers.surveys.SetTunnelfileInfo(tunnelfile)
tunnelfile.save() tunnelfile.save()
uploadedfile.close() uploadedfile.close()
message = "File size %d overwritten with size %d" % (orgsize, tunnelfile.filesize) message = "File size %d overwritten with size %d" % (orgsize, tunnelfile.filesize)
return HttpResponse(content=message, content_type="text/plain") return HttpResponse(content=message, content_type="text/plain")

View File

@@ -10,45 +10,16 @@ from troggle.helper import login_required_if_public
from django.forms.models import modelformset_factory from django.forms.models import modelformset_factory
from django import forms from django import forms
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
from utils import render_with_context # see views_logbooks for explanation on this.
from django.http import HttpResponse, HttpResponseRedirect from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings from django.conf import settings
import re, urlparse import re, urlparse
from django.shortcuts import get_object_or_404 from django.shortcuts import get_object_or_404, render
import settings import settings
from PIL import Image, ImageDraw, ImageFont from PIL import Image, ImageDraw, ImageFont
import string, os, sys, subprocess import string, os, sys, subprocess
#
# NEW CONTENT
#
from troggle.core.models import CaveM, Cave_descriptionM, ExpeditionM
def millenialcaves(request):
#RW messing around area
caves = CaveM.objects.all()
descr = Cave_descriptionM.objects.all()
return render_with_context(request,'millenialcaves.html',{'caves': caves,'descriptions' : descr})
def millenialdescription(request, slug):
desc = Cave_descriptionM.objects.get(slug=slug)
return render_with_context(request,'cave_uground_description.html', {'cave': desc})
def millenialpeople(request):
expos = ExpeditionM.objects.all()
return render_with_context(request,'peoplemillenial.html' , {'expos': expos})
#
# END NEW CONTENT
#
def getCave(cave_id): def getCave(cave_id):
"""Returns a cave object when given a cave name or number. It is used by views including cavehref, ent, and qm.""" """Returns a cave object when given a cave name or number. It is used by views including cavehref, ent, and qm."""
try: try:
@@ -58,7 +29,7 @@ def getCave(cave_id):
return cave return cave
def pad5(x): def pad5(x):
return "0" * (5 -len(x.group(0))) + x.group(0) return "0" * (5 -len(x.group(0))) + x.group(0)
def padnumber(x): def padnumber(x):
return re.sub("\d+", pad5, x) return re.sub("\d+", pad5, x)
def numericalcmp(x, y): def numericalcmp(x, y):
@@ -66,7 +37,7 @@ def numericalcmp(x, y):
def caveCmp(x, y): def caveCmp(x, y):
if x.kataster_number: if x.kataster_number:
if y.kataster_number: if y.kataster_number:
return numericalcmp(x.kataster_number, y.kataster_number) # Note that cave kataster numbers are not generally integers. return numericalcmp(x.kataster_number, y.kataster_number) # Note that cave kataster numbers are not generally integers.
@@ -75,23 +46,22 @@ def caveCmp(x, y):
else: else:
if y.kataster_number: if y.kataster_number:
return 1 return 1
else: else:
return numericalcmp(x.unofficial_number, y.unofficial_number) return numericalcmp(x.unofficial_number, y.unofficial_number)
def caveindex(request): def caveindex(request):
caves = Cave.objects.all() #caves = Cave.objects.all()
notablecavehrefs = settings.NOTABLECAVESHREFS notablecavehrefs = settings.NOTABLECAVESHREFS
notablecaves = [Cave.objects.get(kataster_number=kataster_number) for kataster_number in notablecavehrefs ] notablecaves = [Cave.objects.get(kataster_number=kataster_number) for kataster_number in notablecavehrefs ]
#caves1623 = list(Cave.objects.filter(area__short_name = "1623")) caves1623 = list(Cave.objects.filter(area__short_name = "1623"))
caves1623 = list(Cave.objects.all())
caves1626 = list(Cave.objects.filter(area__short_name = "1626")) caves1626 = list(Cave.objects.filter(area__short_name = "1626"))
caves1623.sort(caveCmp) caves1623.sort(caveCmp)
caves1626.sort(caveCmp) caves1626.sort(caveCmp)
return render_with_context(request,'caveindex.html', {'caves1623': caves1623, 'caves1626': caves1626, 'notablecaves':notablecaves, 'cavepage': True}) return render(request,'caveindex.html', {'caves1623': caves1623, 'caves1626': caves1626, 'notablecaves':notablecaves, 'cavepage': True})
def millenialcaves(request):
#RW messing around area
return HttpResponse("Test text", content_type="text/plain")
def cave3d(request, cave_id=''): def cave3d(request, cave_id=''):
@@ -111,43 +81,44 @@ def cave3d(request, cave_id=''):
def cave(request, cave_id='', offical_name=''): def cave(request, cave_id='', offical_name=''):
cave=getCave(cave_id) cave=getCave(cave_id)
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated(): if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
return render_with_context(request,'nonpublic.html', {'instance': cave, 'cavepage': True, 'cave_id': cave_id}) return render(request,'nonpublic.html', {'instance': cave, 'cavepage': True, 'cave_id': cave_id})
else: else:
return render_with_context(request,'cave.html', {'settings': settings, 'cave': cave, 'cavepage': True, 'cave_id': cave_id}) return render(request,'cave.html', {'settings': settings, 'cave': cave, 'cavepage': True, 'cave_id': cave_id})
def caveEntrance(request, slug): def caveEntrance(request, slug):
cave = Cave.objects.get(caveslug__slug = slug) cave = Cave.objects.get(caveslug__slug = slug)
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated(): if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
return render_with_context(request,'nonpublic.html', {'instance': cave}) return render(request,'nonpublic.html', {'instance': cave})
else: else:
return render_with_context(request,'cave_entrances.html', {'cave': cave}) return render(request,'cave_entrances.html', {'cave': cave})
def caveDescription(request, slug): def caveDescription(request, slug):
cave = Cave.objects.get(caveslug__slug = slug) cave = Cave.objects.get(caveslug__slug = slug)
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated(): if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
return render_with_context(request,'nonpublic.html', {'instance': cave}) return render(request,'nonpublic.html', {'instance': cave})
else: else:
return render_with_context(request,'cave_uground_description.html', {'cave': cave}) return render(request,'cave_uground_description.html', {'cave': cave})
def caveQMs(request, slug): def caveQMs(request, slug):
cave = Cave.objects.get(caveslug__slug = slug) cave = Cave.objects.get(caveslug__slug = slug)
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated(): if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
return render_with_context(request,'nonpublic.html', {'instance': cave}) return render(request,'nonpublic.html', {'instance': cave})
else: else:
return render_with_context(request,'cave_qms.html', {'cave': cave}) return render(request,'cave_qms.html', {'cave': cave})
def caveLogbook(request, slug): def caveLogbook(request, slug):
cave = Cave.objects.get(caveslug__slug = slug) cave = Cave.objects.get(caveslug__slug = slug)
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated(): if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
return render_with_context(request,'nonpublic.html', {'instance': cave}) return render(request,'nonpublic.html', {'instance': cave})
else: else:
return render_with_context(request,'cave_logbook.html', {'cave': cave}) return render(request,'cave_logbook.html', {'cave': cave})
def caveSlug(request, slug): def caveSlug(request, slug):
cave = Cave.objects.get(caveslug__slug = slug) cave = Cave.objects.get(caveslug__slug = slug)
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated(): if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
return render_with_context(request,'nonpublic.html', {'instance': cave, 'cave_editable': slug}) return render(request,'nonpublic.html', {'instance': cave, 'cave_editable': slug})
else: else:
return render_with_context(request,'cave.html', {'cave': cave, 'cave_editable': slug}) return render(request,'cave.html', {'cave': cave, 'cave_editable': slug})
@login_required_if_public @login_required_if_public
def edit_cave(request, slug=None): def edit_cave(request, slug=None):
@@ -182,14 +153,14 @@ def edit_cave(request, slug=None):
ceinst.cave = cave ceinst.cave = cave
ceinst.save() ceinst.save()
cave.writeDataFile() cave.writeDataFile()
return HttpResponseRedirect("/" + cave.url) return HttpResponseRedirect("/" + cave.url)
else: else:
form = CaveForm(instance=cave) form = CaveForm(instance=cave)
ceFormSet = CaveAndEntranceFormSet(queryset=cave.caveandentrance_set.all()) ceFormSet = CaveAndEntranceFormSet(queryset=cave.caveandentrance_set.all())
versionControlForm = VersionControlCommentForm() versionControlForm = VersionControlCommentForm()
return render_with_context(request, return render(request,
'editcave2.html', 'editcave2.html',
{'form': form, {'form': form,
'caveAndEntranceFormSet': ceFormSet, 'caveAndEntranceFormSet': ceFormSet,
'versionControlForm': versionControlForm 'versionControlForm': versionControlForm
@@ -197,7 +168,7 @@ def edit_cave(request, slug=None):
@login_required_if_public @login_required_if_public
def editEntrance(request, caveslug, slug=None): def editEntrance(request, caveslug, slug=None):
cave = Cave.objects.get(caveslug__slug = caveslug) cave = Cave.objects.get(caveslug__slug = caveslug)
if slug is not None: if slug is not None:
entrance = Entrance.objects.get(entranceslug__slug = slug) entrance = Entrance.objects.get(entranceslug__slug = slug)
else: else:
@@ -224,7 +195,7 @@ def editEntrance(request, caveslug, slug=None):
el.entrance = entrance el.entrance = entrance
el.save() el.save()
entrance.writeDataFile() entrance.writeDataFile()
return HttpResponseRedirect("/" + cave.url) return HttpResponseRedirect("/" + cave.url)
else: else:
form = EntranceForm(instance = entrance) form = EntranceForm(instance = entrance)
versionControlForm = VersionControlCommentForm() versionControlForm = VersionControlCommentForm()
@@ -232,104 +203,102 @@ def editEntrance(request, caveslug, slug=None):
entletter = EntranceLetterForm(request.POST) entletter = EntranceLetterForm(request.POST)
else: else:
entletter = None entletter = None
return render_with_context(request, return render(request,
'editentrance.html', 'editentrance.html',
{'form': form, {'form': form,
'versionControlForm': versionControlForm, 'versionControlForm': versionControlForm,
'entletter': entletter 'entletter': entletter
}) })
def qm(request,cave_id,qm_id,year,grade=None): def qm(request,qm_id):
year=int(year)
try: try:
qm=getCave(cave_id).get_QMs().get(number=qm_id,found_by__date__year=year) qm=QM.objects.get(id=qm_id)
return render_with_context(request,'qm.html',locals()) return render(request,'qm.html',locals())
except QM.DoesNotExist: except QM.DoesNotExist:
url=urlparse.urljoin(settings.URL_ROOT, r'/admin/core/qm/add/'+'?'+ r'number=' + qm_id) url=urlparse.urljoin(settings.URL_ROOT, r'/admin/core/qm/add/'+'?'+ r'number=' + qm_id)
if grade: if grade:
url += r'&grade=' + grade url += r'&grade=' + grade
return HttpResponseRedirect(url) return HttpResponseRedirect(url)
def ent(request, cave_id, ent_letter): def ent(request, cave_id, ent_letter):
cave = Cave.objects.filter(kataster_number = cave_id)[0] cave = Cave.objects.filter(kataster_number = cave_id)[0]
cave_and_ent = CaveAndEntrance.objects.filter(cave = cave).filter(entrance_letter = ent_letter)[0] cave_and_ent = CaveAndEntrance.objects.filter(cave = cave).filter(entrance_letter = ent_letter)[0]
return render_with_context(request,'entrance.html', {'cave': cave, return render(request,'entrance.html', {'cave': cave,
'entrance': cave_and_ent.entrance, 'entrance': cave_and_ent.entrance,
'letter': cave_and_ent.entrance_letter,}) 'letter': cave_and_ent.entrance_letter,})
def entranceSlug(request, slug): def entranceSlug(request, slug):
entrance = Entrance.objects.get(entranceslug__slug = slug) entrance = Entrance.objects.get(entranceslug__slug = slug)
if entrance.non_public and not request.user.is_authenticated(): if entrance.non_public and not request.user.is_authenticated():
return render_with_context(request,'nonpublic.html', {'instance': entrance}) return render(request,'nonpublic.html', {'instance': entrance})
else: else:
return render_with_context(request,'entranceslug.html', {'entrance': entrance}) return render(request,'entranceslug.html', {'entrance': entrance})
def survexblock(request, survexpath): def survexblock(request, survexpath):
survexpath = re.sub("/", ".", survexpath) survexpath = re.sub("/", ".", survexpath)
print "jjjjjj", survexpath print("jjjjjj", survexpath)
survexblock = models.SurvexBlock.objects.get(survexpath=survexpath) survexblock = models.SurvexBlock.objects.get(survexpath=survexpath)
#ftext = survexblock.filecontents() #ftext = survexblock.filecontents()
ftext = survexblock.text ftext = survexblock.text
return render_with_context(request,'survexblock.html', {'survexblock':survexblock, 'ftext':ftext, }) return render(request,'survexblock.html', {'survexblock':survexblock, 'ftext':ftext, })
def surveyindex(request): def surveyindex(request):
surveys=Survey.objects.all() surveys=Survey.objects.all()
expeditions=Expedition.objects.order_by("-year") expeditions=Expedition.objects.order_by("-year")
return render_with_context(request,'survey.html',locals()) return render(request,'survey.html',locals())
def survey(request,year,wallet_number): def survey(request,year,wallet_number):
surveys=Survey.objects.all() surveys=Survey.objects.all()
expeditions=Expedition.objects.order_by("-year") expeditions=Expedition.objects.order_by("-year")
current_expedition=Expedition.objects.filter(year=year)[0] current_expedition=Expedition.objects.filter(year=year)[0]
if wallet_number!='': if wallet_number!='':
current_survey=Survey.objects.filter(expedition=current_expedition,wallet_number=wallet_number)[0] current_survey=Survey.objects.filter(expedition=current_expedition,wallet_number=wallet_number)[0]
notes=current_survey.scannedimage_set.filter(contents='notes') notes=current_survey.scannedimage_set.filter(contents='notes')
planSketches=current_survey.scannedimage_set.filter(contents='plan') planSketches=current_survey.scannedimage_set.filter(contents='plan')
elevationSketches=current_survey.scannedimage_set.filter(contents='elevation') elevationSketches=current_survey.scannedimage_set.filter(contents='elevation')
return render_with_context(request,'survey.html', locals()) return render(request,'survey.html', locals())
def cave_description(request, cavedescription_name): def cave_description(request, cavedescription_name):
cave_description = get_object_or_404(CaveDescription, short_name = cavedescription_name) cave_description = get_object_or_404(CaveDescription, short_name = cavedescription_name)
return render_with_context(request,'cave_description.html', locals()) return render(request,'cave_description.html', locals())
def get_entrances(request, caveslug): def get_entrances(request, caveslug):
cave = Cave.objects.get(caveslug__slug = caveslug) cave = Cave.objects.get(caveslug__slug = caveslug)
return render_with_context(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]}) return render(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]})
def get_qms(request, caveslug): def get_qms(request, caveslug):
cave = Cave.objects.get(caveslug__slug = caveslug) cave = Cave.objects.get(caveslug__slug = caveslug)
return render_with_context(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]}) return render(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]})
areanames = [ areanames = [
#('', 'Location unclear'), #('', 'Location unclear'),
('1a', '1a &ndash; Plateau: around Top Camp'), ('1a', '1a &ndash; Plateau: around Top Camp'),
('1b', '1b &ndash; Western plateau near 182'), ('1b', '1b &ndash; Western plateau near 182'),
('1c', '1c &ndash; Eastern plateau near 204 walk-in path'), ('1c', '1c &ndash; Eastern plateau near 204 walk-in path'),
('1d', '1d &ndash; Further plateau around 76'), ('1d', '1d &ndash; Further plateau around 76'),
('2a', '2a &ndash; Southern Schwarzmooskogel near 201 path and the Nipple'), ('2a', '2a &ndash; Southern Schwarzmooskogel near 201 path and the Nipple'),
('2b', '2b &ndash; Eish&ouml;hle area'), ('2b', '2b &ndash; Eish&ouml;hle area'),
('2b or 4 (unclear)', '2b or 4 (unclear)'), ('2b or 4 (unclear)', '2b or 4 (unclear)'),
('2c', '2c &ndash; Kaninchenh&ouml;hle area'), ('2c', '2c &ndash; Kaninchenh&ouml;hle area'),
('2d', '2d &ndash; Steinbr&uuml;ckenh&ouml;hle area'), ('2d', '2d &ndash; Steinbr&uuml;ckenh&ouml;hle area'),
('3', '3 &ndash; Br&auml;uning Alm'), ('3', '3 &ndash; Br&auml;uning Alm'),
('4', '4 &ndash; Kratzer valley'), ('4', '4 &ndash; Kratzer valley'),
('5', '5 &ndash; Schwarzmoos-Wildensee'), ('5', '5 &ndash; Schwarzmoos-Wildensee'),
('6', '6 &ndash; Far plateau'), ('6', '6 &ndash; Far plateau'),
('1626 or 6 (borderline)', '1626 or 6 (borderline)'), ('1626 or 6 (borderline)', '1626 or 6 (borderline)'),
('7', '7 &ndash; Egglgrube'), ('7', '7 &ndash; Egglgrube'),
('8a', '8a &ndash; Loser south face'), ('8a', '8a &ndash; Loser south face'),
('8b', '8b &ndash; Loser below Dimmelwand'), ('8b', '8b &ndash; Loser below Dimmelwand'),
('8c', '8c &ndash; Augst See'), ('8c', '8c &ndash; Augst See'),
('8d', '8d &ndash; Loser-Hochganger ridge'), ('8d', '8d &ndash; Loser-Hochganger ridge'),
('9', '9 &ndash; Gschwandt Alm'), ('9', '9 &ndash; Gschwandt Alm'),
('10', '10 &ndash; Altaussee'), ('10', '10 &ndash; Altaussee'),
('11', '11 &ndash; Augstbach') ('11', '11 &ndash; Augstbach')
] ]
def prospecting(request): def prospecting(request):
@@ -341,27 +310,27 @@ def prospecting(request):
caves = list(a.cave_set.all()) caves = list(a.cave_set.all())
caves.sort(caveCmp) caves.sort(caveCmp)
areas.append((name, a, caves)) areas.append((name, a, caves))
return render_with_context(request, 'prospecting.html', {"areas": areas}) return render(request, 'prospecting.html', {"areas": areas})
# Parameters for big map and zoomed subarea maps: # Parameters for big map and zoomed subarea maps:
# big map first (zoom factor ignored) # big map first (zoom factor ignored)
maps = { maps = {
# id left top right bottom zoom # id left top right bottom zoom
# G&K G&K G&K G&K factor # G&K G&K G&K G&K factor
"all": [33810.4, 85436.5, 38192.0, 81048.2, 0.35, "all": [33810.4, 85436.5, 38192.0, 81048.2, 0.35,
"All"], "All"],
"40": [36275.6, 82392.5, 36780.3, 81800.0, 3.0, "40": [36275.6, 82392.5, 36780.3, 81800.0, 3.0,
"Eish&ouml;hle"], "Eish&ouml;hle"],
"76": [35440.0, 83220.0, 36090.0, 82670.0, 1.3, "76": [35440.0, 83220.0, 36090.0, 82670.0, 1.3,
"Eislufth&ouml;hle"], "Eislufth&ouml;hle"],
"204": [36354.1, 84154.5, 37047.4, 83300, 3.0, "204": [36354.1, 84154.5, 37047.4, 83300, 3.0,
"Steinbr&uuml;ckenh&ouml;hle"], "Steinbr&uuml;ckenh&ouml;hle"],
"tc": [35230.0, 82690.0, 36110.0, 82100.0, 3.0, "tc": [35230.0, 82690.0, 36110.0, 82100.0, 3.0,
"Near Top Camp"], "Near Top Camp"],
"grieß": "grieß":
[36000.0, 86300.0, 38320.0, 84400.0, 4.0, [36000.0, 86300.0, 38320.0, 84400.0, 4.0,
"Grießkogel Area"], "Grießkogel Area"],
} }
for n in maps.keys(): for n in maps.keys():
@@ -372,7 +341,7 @@ for n in maps.keys():
for j in range(2): for j in range(2):
maps["%s%i%i" % (n, i, j)] = [L + i * W, T - j * H, L + (i + 1) * W, T - (j + 1) * H, S, name] maps["%s%i%i" % (n, i, j)] = [L + i * W, T - j * H, L + (i + 1) * W, T - (j + 1) * H, S, name]
# Keys in the order in which we want the maps output # Keys in the order in which we want the maps output
mapcodes = ["all", "grieß","40", "76", "204", "tc"] mapcodes = ["all", "grieß","40", "76", "204", "tc"]
# Field codes # Field codes
L = 0 L = 0
T = 1 T = 1
@@ -382,54 +351,54 @@ ZOOM = 4
DESC = 5 DESC = 5
areacolours = { areacolours = {
'1a' : '#00ffff', '1a' : '#00ffff',
'1b' : '#ff00ff', '1b' : '#ff00ff',
'1c' : '#ffff00', '1c' : '#ffff00',
'1d' : '#ffffff', '1d' : '#ffffff',
'2a' : '#ff0000', '2a' : '#ff0000',
'2b' : '#00ff00', '2b' : '#00ff00',
'2c' : '#008800', '2c' : '#008800',
'2d' : '#ff9900', '2d' : '#ff9900',
'3' : '#880000', '3' : '#880000',
'4' : '#0000ff', '4' : '#0000ff',
'6' : '#000000', # doubles for surface fixed pts, and anything else '6' : '#000000', # doubles for surface fixed pts, and anything else
'7' : '#808080' '7' : '#808080'
} }
for FONT in [ for FONT in [
"/usr/share/fonts/truetype/freefont/FreeSans.ttf", "/usr/share/fonts/truetype/freefont/FreeSans.ttf",
"/usr/X11R6/lib/X11/fonts/truetype/arial.ttf", "/usr/X11R6/lib/X11/fonts/truetype/arial.ttf",
"C:\WINNT\Fonts\ARIAL.TTF" "C:\WINNT\Fonts\ARIAL.TTF"
]: ]:
if os.path.isfile(FONT): break if os.path.isfile(FONT): break
TEXTSIZE = 16 TEXTSIZE = 16
CIRCLESIZE =8 CIRCLESIZE =8
LINEWIDTH = 2 LINEWIDTH = 2
myFont = ImageFont.truetype(FONT, TEXTSIZE) myFont = ImageFont.truetype(FONT, TEXTSIZE)
def mungecoord(x, y, mapcode, img): def mungecoord(x, y, mapcode, img):
# Top of Zinken is 73 1201 = dataset 34542 81967 # Top of Zinken is 73 1201 = dataset 34542 81967
# Top of Hinter is 1073 562 = dataset 36670 83317 # Top of Hinter is 1073 562 = dataset 36670 83317
# image is 1417 by 2201 # image is 1417 by 2201
# FACTOR1 = 1000.0 / (36670.0-34542.0) # FACTOR1 = 1000.0 / (36670.0-34542.0)
# FACTOR2 = (1201.0-562.0) / (83317 - 81967) # FACTOR2 = (1201.0-562.0) / (83317 - 81967)
# FACTOR = (FACTOR1 + FACTOR2)/2 # FACTOR = (FACTOR1 + FACTOR2)/2
# The factors aren't the same as the scanned map's at a slight angle. I # The factors aren't the same as the scanned map's at a slight angle. I
# can't be bothered to fix this. Since we zero on the Hinter it makes # can't be bothered to fix this. Since we zero on the Hinter it makes
# very little difference for caves in the areas round 76 or 204. # very little difference for caves in the areas round 76 or 204.
# xoffset = (x - 36670)*FACTOR # xoffset = (x - 36670)*FACTOR
# yoffset = (y - 83317)*FACTOR # yoffset = (y - 83317)*FACTOR
# return (1073 + xoffset, 562 - yoffset) # return (1073 + xoffset, 562 - yoffset)
m = maps[mapcode]
factorX, factorY = img.size[0] / (m[R] - m[L]), img.size[1] / (m[T] - m[B])
return ((x - m[L]) * factorX, (m[T] - y) * factorY)
m = maps[mapcode]
factorX, factorY = img.size[0] / (m[R] - m[L]), img.size[1] / (m[T] - m[B])
return ((x - m[L]) * factorX, (m[T] - y) * factorY)
COL_TYPES = {True: "red", COL_TYPES = {True: "red",
False: "#dddddd", False: "#dddddd",
"Reference": "#dddddd"} "Reference": "#dddddd"}
def plot(surveypoint, number, point_type, label, mapcode, draw, img): def plot(surveypoint, number, point_type, label, mapcode, draw, img):
try: try:
ss = SurvexStation.objects.lookup(surveypoint) ss = SurvexStation.objects.lookup(surveypoint)
@@ -451,40 +420,40 @@ def prospecting_image(request, name):
m = maps[name] m = maps[name]
#imgmaps = [] #imgmaps = []
if name == "all": if name == "all":
img = mainImage img = mainImage
else: else:
M = maps['all'] M = maps['all']
W, H = mainImage.size W, H = mainImage.size
l = int((m[L] - M[L]) / (M[R] - M[L]) * W) l = int((m[L] - M[L]) / (M[R] - M[L]) * W)
t = int((m[T] - M[T]) / (M[B] - M[T]) * H) t = int((m[T] - M[T]) / (M[B] - M[T]) * H)
r = int((m[R] - M[L]) / (M[R] - M[L]) * W) r = int((m[R] - M[L]) / (M[R] - M[L]) * W)
b = int((m[B] - M[T]) / (M[B] - M[T]) * H) b = int((m[B] - M[T]) / (M[B] - M[T]) * H)
img = mainImage.crop((l, t, r, b)) img = mainImage.crop((l, t, r, b))
w = int(round(m[ZOOM] * (m[R] - m[L]) / (M[R] - M[L]) * W)) w = int(round(m[ZOOM] * (m[R] - m[L]) / (M[R] - M[L]) * W))
h = int(round(m[ZOOM] * (m[B] - m[T]) / (M[B] - M[T]) * H)) h = int(round(m[ZOOM] * (m[B] - m[T]) / (M[B] - M[T]) * H))
img = img.resize((w, h), Image.BICUBIC) img = img.resize((w, h), Image.BICUBIC)
draw = ImageDraw.Draw(img) draw = ImageDraw.Draw(img)
draw.setfont(myFont) draw.setfont(myFont)
if name == "all": if name == "all":
for maparea in maps.keys(): for maparea in maps.keys():
if maparea == "all": if maparea == "all":
continue continue
localm = maps[maparea] localm = maps[maparea]
l,t = mungecoord(localm[L], localm[T], "all", img) l,t = mungecoord(localm[L], localm[T], "all", img)
r,b = mungecoord(localm[R], localm[B], "all", img) r,b = mungecoord(localm[R], localm[B], "all", img)
text = maparea + " map" text = maparea + " map"
textlen = draw.textsize(text)[0] + 3 textlen = draw.textsize(text)[0] + 3
draw.rectangle([l, t, l+textlen, t+TEXTSIZE+2], fill='#ffffff') draw.rectangle([l, t, l+textlen, t+TEXTSIZE+2], fill='#ffffff')
draw.text((l+2, t+1), text, fill="#000000") draw.text((l+2, t+1), text, fill="#000000")
#imgmaps.append( [l, t, l+textlen, t+SIZE+2, "submap" + maparea, maparea + " subarea map"] ) #imgmaps.append( [l, t, l+textlen, t+SIZE+2, "submap" + maparea, maparea + " subarea map"] )
draw.line([l, t, r, t], fill='#777777', width=LINEWIDTH) draw.line([l, t, r, t], fill='#777777', width=LINEWIDTH)
draw.line([l, b, r, b], fill='#777777', width=LINEWIDTH) draw.line([l, b, r, b], fill='#777777', width=LINEWIDTH)
draw.line([l, t, l, b], fill='#777777', width=LINEWIDTH) draw.line([l, t, l, b], fill='#777777', width=LINEWIDTH)
draw.line([r, t, r, b], fill='#777777', width=LINEWIDTH) draw.line([r, t, r, b], fill='#777777', width=LINEWIDTH)
draw.line([l, t, l+textlen, t], fill='#777777', width=LINEWIDTH) draw.line([l, t, l+textlen, t], fill='#777777', width=LINEWIDTH)
draw.line([l, t+TEXTSIZE+2, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH) draw.line([l, t+TEXTSIZE+2, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
draw.line([l, t, l, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH) draw.line([l, t, l, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
draw.line([l+textlen, t, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH) draw.line([l+textlen, t, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
#imgmaps[maparea] = [] #imgmaps[maparea] = []
# Draw scale bar # Draw scale bar
m100 = int(100 / (m[R] - m[L]) * img.size[0]) m100 = int(100 / (m[R] - m[L]) * img.size[0])
@@ -496,7 +465,7 @@ def prospecting_image(request, name):
plot("laser.0_7", "BNase", "Reference", "Br&auml;uning Nase laser point", name, draw, img) plot("laser.0_7", "BNase", "Reference", "Br&auml;uning Nase laser point", name, draw, img)
plot("226-96", "BZkn", "Reference", "Br&auml;uning Zinken trig point", name, draw, img) plot("226-96", "BZkn", "Reference", "Br&auml;uning Zinken trig point", name, draw, img)
plot("vd1","VD1","Reference", "VD1 survey point", name, draw, img) plot("vd1","VD1","Reference", "VD1 survey point", name, draw, img)
plot("laser.kt114_96","HSK","Reference", "Hinterer Schwarzmooskogel trig point", name, draw, img) plot("laser.kt114_96","HSK","Reference", "Hinterer Schwarzmooskogel trig point", name, draw, img)
plot("2000","Nipple","Reference", "Nipple (Wei&szlig;e Warze)", name, draw, img) plot("2000","Nipple","Reference", "Nipple (Wei&szlig;e Warze)", name, draw, img)
plot("3000","VSK","Reference", "Vorderer Schwarzmooskogel summit", name, draw, img) plot("3000","VSK","Reference", "Vorderer Schwarzmooskogel summit", name, draw, img)
plot("topcamp", "TC", "Reference", "Top Camp", name, draw, img) plot("topcamp", "TC", "Reference", "Top Camp", name, draw, img)
@@ -506,14 +475,15 @@ def prospecting_image(request, name):
plot("laser.0_5", "LSR5", "Reference", "Laser Point 0/5", name, draw, img) plot("laser.0_5", "LSR5", "Reference", "Laser Point 0/5", name, draw, img)
plot("225-96", "BAlm", "Reference", "Br&auml;uning Alm trig point", name, draw, img) plot("225-96", "BAlm", "Reference", "Br&auml;uning Alm trig point", name, draw, img)
for entrance in Entrance.objects.all(): for entrance in Entrance.objects.all():
station = entrance.best_station() station = entrance.best_station()
if station: if station:
#try: #try:
areaName = entrance.caveandentrance_set.all()[0].cave.getArea().short_name areaName = entrance.caveandentrance_set.all()[0].cave.getArea().short_name
plot(station, "%s-%s" % (areaName, str(entrance)[5:]), entrance.needs_surface_work(), str(entrance), name, draw, img) plot(station, "%s-%s" % (areaName, str(entrance)
#except: [5:]), entrance.needs_surface_work(), str(entrance), name, draw, img)
# pass #except:
# pass
for (N, E, D, num) in [(35975.37, 83018.21, 100,"177"), # Calculated from bearings for (N, E, D, num) in [(35975.37, 83018.21, 100,"177"), # Calculated from bearings
(35350.00, 81630.00, 50, "71"), # From Auer map (35350.00, 81630.00, 50, "71"), # From Auer map
(36025.00, 82475.00, 50, "146"), # From mystery map (36025.00, 82475.00, 50, "146"), # From mystery map
@@ -537,8 +507,8 @@ def prospecting_image(request, name):
del draw del draw
img.save(response, "PNG") img.save(response, "PNG")
return response return response
STATIONS = {} STATIONS = {}
poslineregex = re.compile("^\(\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*)\s*\)\s*([^\s]+)$") poslineregex = re.compile("^\(\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*)\s*\)\s*([^\s]+)$")
def LoadPos(): def LoadPos():
call([settings.CAVERN, "--output=%s/all.3d" % settings.SURVEX_DATA, "%s/all.svx" % settings.SURVEX_DATA]) call([settings.CAVERN, "--output=%s/all.3d" % settings.SURVEX_DATA, "%s/all.svx" % settings.SURVEX_DATA])
@@ -546,7 +516,7 @@ def LoadPos():
posfile = open("%sall.pos" % settings.SURVEX_DATA) posfile = open("%sall.pos" % settings.SURVEX_DATA)
posfile.readline()#Drop header posfile.readline()#Drop header
for line in posfile.readlines(): for line in posfile.readlines():
r = poslineregex.match(line) r = poslineregex.match(line)
if r: if r:
x, y, z, name = r.groups() x, y, z, name = r.groups()
STATIONS[name] = (x, y, z) STATIONS[name] = (x, y, z)

View File

@@ -1,4 +1,4 @@
from django.shortcuts import render_to_response from django.shortcuts import render_to_response, render
from troggle.core.models import Expedition, Person, PersonExpedition, PersonTrip, LogbookEntry, SurvexBlock from troggle.core.models import Expedition, Person, PersonExpedition, PersonTrip, LogbookEntry, SurvexBlock
import troggle.core.models as models import troggle.core.models as models
import troggle.settings as settings import troggle.settings as settings
@@ -9,7 +9,6 @@ from troggle.core.forms import getTripForm#, get_name, PersonForm
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse from django.http import HttpResponseRedirect, HttpResponse
from django.template import Context, loader from django.template import Context, loader
from utils import render_with_context
import os.path import os.path
import troggle.parsers.logbooks as logbookparsers import troggle.parsers.logbooks as logbookparsers
from django.template.defaultfilters import slugify from django.template.defaultfilters import slugify
@@ -38,7 +37,7 @@ def getNotablePersons():
for person in Person.objects.all(): for person in Person.objects.all():
if person.bisnotable(): if person.bisnotable():
notablepersons.append(person) notablepersons.append(person)
return notablepersons return notablepersons
def personindex(request): def personindex(request):
@@ -49,13 +48,13 @@ def personindex(request):
nc = (len(persons) + ncols - 1) / ncols nc = (len(persons) + ncols - 1) / ncols
for i in range(ncols): for i in range(ncols):
personss.append(persons[i * nc: (i + 1) * nc]) personss.append(persons[i * nc: (i + 1) * nc])
notablepersons = [] notablepersons = []
for person in Person.objects.all(): for person in Person.objects.all():
if person.bisnotable(): if person.bisnotable():
notablepersons.append(person) notablepersons.append(person)
return render_with_context(request,'personindex.html', {'persons': persons, 'personss':personss, 'notablepersons':notablepersons, }) return render(request,'personindex.html', {'persons': persons, 'personss':personss, 'notablepersons':notablepersons})
def expedition(request, expeditionname): def expedition(request, expeditionname):
@@ -68,17 +67,20 @@ def expedition(request, expeditionname):
for personexpedition in this_expedition.personexpedition_set.all(): for personexpedition in this_expedition.personexpedition_set.all():
prow = [ ] prow = [ ]
for date in dates: for date in dates:
pcell = { "persontrips": PersonTrip.objects.filter(personexpedition=personexpedition, pcell = { "persontrips": PersonTrip.objects.filter(personexpedition=personexpedition,
logbook_entry__date=date) } logbook_entry__date=date) }
pcell["survexblocks"] = set(SurvexBlock.objects.filter(survexpersonrole__personexpedition=personexpedition, pcell["survexblocks"] = set(SurvexBlock.objects.filter(survexpersonrole__personexpedition=personexpedition,
date = date)) date=date))
prow.append(pcell) prow.append(pcell)
personexpeditiondays.append({"personexpedition":personexpedition, "personrow":prow}) personexpeditiondays.append({"personexpedition":personexpedition, "personrow":prow})
message = ""
if "reload" in request.GET: if "reload" in request.GET:
message = LoadLogbookForExpedition(this_expedition) LoadLogbookForExpedition(this_expedition)
return render_with_context(request,'expedition.html', {'expedition': this_expedition, 'expeditions':expeditions, 'personexpeditiondays':personexpeditiondays, 'message':message, 'settings':settings, 'dateditems': dateditems }) return render(request,'expedition.html', {'this_expedition': this_expedition,
'expeditions':expeditions,
'personexpeditiondays':personexpeditiondays,
'settings':settings,
'dateditems': dateditems })
def get_absolute_url(self): def get_absolute_url(self):
return ('expedition', (expedition.year)) return ('expedition', (expedition.year))
@@ -95,39 +97,39 @@ class ExpeditionListView(ListView):
def person(request, first_name='', last_name='', ): def person(request, first_name='', last_name='', ):
this_person = Person.objects.get(first_name = first_name, last_name = last_name) this_person = Person.objects.get(first_name = first_name, last_name = last_name)
# This is for removing the reference to the user's profile, in case they set it to the wrong person # This is for removing the reference to the user's profile, in case they set it to the wrong person
if request.method == 'GET': if request.method == 'GET':
if request.GET.get('clear_profile')=='True': if request.GET.get('clear_profile')=='True':
this_person.user=None this_person.user=None
this_person.save() this_person.save()
return HttpResponseRedirect(reverse('profiles_select_profile')) return HttpResponseRedirect(reverse('profiles_select_profile'))
return render_with_context(request,'person.html', {'person': this_person, }) return render(request,'person.html', {'person': this_person, })
def GetPersonChronology(personexpedition): def GetPersonChronology(personexpedition):
res = { } res = { }
for persontrip in personexpedition.persontrip_set.all(): for persontrip in personexpedition.persontrip_set.all():
a = res.setdefault(persontrip.date, { }) a = res.setdefault(persontrip.logbook_entry.date, { })
a.setdefault("persontrips", [ ]).append(persontrip) a.setdefault("persontrips", [ ]).append(persontrip)
for personrole in personexpedition.survexpersonrole_set.all(): for personrole in personexpedition.survexpersonrole_set.all():
a = res.setdefault(personrole.survexblock.date, { }) a = res.setdefault(personrole.survexblock.date, { })
a.setdefault("personroles", [ ]).append(personrole.survexblock) a.setdefault("personroles", [ ]).append(personrole.survexblock)
# build up the tables # build up the tables
rdates = res.keys() rdates = res.keys()
rdates.sort() rdates.sort()
res2 = [ ] res2 = [ ]
for rdate in rdates: for rdate in rdates:
persontrips = res[rdate].get("persontrips", []) persontrips = res[rdate].get("persontrips", [])
personroles = res[rdate].get("personroles", []) personroles = res[rdate].get("personroles", [])
for n in range(max(len(persontrips), len(personroles))): for n in range(max(len(persontrips), len(personroles))):
res2.append(((n == 0 and rdate or "--"), (n < len(persontrips) and persontrips[n]), (n < len(personroles) and personroles[n]))) res2.append(((n == 0 and rdate or "--"), (n < len(persontrips) and persontrips[n]), (n < len(personroles) and personroles[n])))
return res2 return res2
@@ -136,17 +138,17 @@ def personexpedition(request, first_name='', last_name='', year=''):
this_expedition = Expedition.objects.get(year=year) this_expedition = Expedition.objects.get(year=year)
personexpedition = person.personexpedition_set.get(expedition=this_expedition) personexpedition = person.personexpedition_set.get(expedition=this_expedition)
personchronology = GetPersonChronology(personexpedition) personchronology = GetPersonChronology(personexpedition)
return render_with_context(request,'personexpedition.html', {'personexpedition': personexpedition, 'personchronology':personchronology}) return render(request,'personexpedition.html', {'personexpedition': personexpedition, 'personchronology':personchronology})
def logbookentry(request, date, slug): def logbookentry(request, date, slug):
this_logbookentry = LogbookEntry.objects.filter(date=date, slug=slug) this_logbookentry = LogbookEntry.objects.filter(date=date, slug=slug)
if len(this_logbookentry)>1: if len(this_logbookentry)>1:
return render_with_context(request, 'object_list.html',{'object_list':this_logbookentry}) return render(request, 'object_list.html',{'object_list':this_logbookentry})
else: else:
this_logbookentry=this_logbookentry[0] this_logbookentry=this_logbookentry[0]
return render_with_context(request, 'logbookentry.html', {'logbookentry': this_logbookentry}) return render(request, 'logbookentry.html', {'logbookentry': this_logbookentry})
def logbookSearch(request, extra): def logbookSearch(request, extra):
@@ -157,14 +159,14 @@ def logbookSearch(request, extra):
entry_query = search.get_query(query_string, ['text','title',]) entry_query = search.get_query(query_string, ['text','title',])
found_entries = LogbookEntry.objects.filter(entry_query) found_entries = LogbookEntry.objects.filter(entry_query)
return render_with_context(request,'logbooksearch.html', return render(request,'logbooksearch.html',
{ 'query_string': query_string, 'found_entries': found_entries, }) { 'query_string': query_string, 'found_entries': found_entries, })
#context_instance=RequestContext(request)) #context_instance=RequestContext(request))
def personForm(request,pk): def personForm(request,pk):
person=Person.objects.get(pk=pk) person=Person.objects.get(pk=pk)
form=PersonForm(instance=person) form=PersonForm(instance=person)
return render_with_context(request,'personform.html', {'form':form,}) return render(request,'personform.html', {'form':form,})
def experimental(request): def experimental(request):
@@ -178,10 +180,10 @@ def experimental(request):
survexleglength += survexblock.totalleglength survexleglength += survexblock.totalleglength
legsbyexpo.append((expedition, {"nsurvexlegs":len(survexlegs), "survexleglength":survexleglength})) legsbyexpo.append((expedition, {"nsurvexlegs":len(survexlegs), "survexleglength":survexleglength}))
legsbyexpo.reverse() legsbyexpo.reverse()
survexlegs = models.SurvexLeg.objects.all() survexlegs = models.SurvexLeg.objects.all()
totalsurvexlength = sum([survexleg.tape for survexleg in survexlegs]) totalsurvexlength = sum([survexleg.tape for survexleg in survexlegs])
return render_with_context(request, 'experimental.html', { "nsurvexlegs":len(survexlegs), "totalsurvexlength":totalsurvexlength, "legsbyexpo":legsbyexpo }) return render(request, 'experimental.html', { "nsurvexlegs":len(survexlegs), "totalsurvexlength":totalsurvexlength, "legsbyexpo":legsbyexpo })
@login_required_if_public @login_required_if_public
def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None): def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
@@ -196,11 +198,11 @@ def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
personTripFormSet = PersonTripFormSet(request.POST) personTripFormSet = PersonTripFormSet(request.POST)
if tripForm.is_valid() and personTripFormSet.is_valid(): # All validation rules pass if tripForm.is_valid() and personTripFormSet.is_valid(): # All validation rules pass
dateStr = tripForm.cleaned_data["date"].strftime("%Y-%m-%d") dateStr = tripForm.cleaned_data["date"].strftime("%Y-%m-%d")
directory = os.path.join(settings.EXPOWEB, directory = os.path.join(settings.EXPOWEB,
"years", "years",
expedition.year, expedition.year,
"autologbook") "autologbook")
filename = os.path.join(directory, filename = os.path.join(directory,
dateStr + "." + slugify(tripForm.cleaned_data["title"])[:50] + ".html") dateStr + "." + slugify(tripForm.cleaned_data["title"])[:50] + ".html")
if not os.path.isdir(directory): if not os.path.isdir(directory):
os.mkdir(directory) os.mkdir(directory)
@@ -208,7 +210,7 @@ def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
delLogbookEntry(previouslbe) delLogbookEntry(previouslbe)
f = open(filename, "w") f = open(filename, "w")
template = loader.get_template('dataformat/logbookentry.html') template = loader.get_template('dataformat/logbookentry.html')
context = Context({'trip': tripForm.cleaned_data, context = Context({'trip': tripForm.cleaned_data,
'persons': personTripFormSet.cleaned_data, 'persons': personTripFormSet.cleaned_data,
'date': dateStr, 'date': dateStr,
'expeditionyear': expeditionyear}) 'expeditionyear': expeditionyear})
@@ -232,15 +234,15 @@ def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
"location": previouslbe.place, "location": previouslbe.place,
"caveOrLocation": "location", "caveOrLocation": "location",
"html": previouslbe.text}) "html": previouslbe.text})
personTripFormSet = PersonTripFormSet(initial=[{"name": get_name(py.personexpedition), personTripFormSet = PersonTripFormSet(initial=[{"name": get_name(py.personexpedition),
"TU": py.time_underground, "TU": py.time_underground,
"author": py.is_logbook_entry_author} "author": py.is_logbook_entry_author}
for py in previouslbe.persontrip_set.all()]) for py in previouslbe.persontrip_set.all()])
else: else:
tripForm = TripForm() # An unbound form tripForm = TripForm() # An unbound form
personTripFormSet = PersonTripFormSet() personTripFormSet = PersonTripFormSet()
return render_with_context(request, 'newlogbookentry.html', { return render(request, 'newlogbookentry.html', {
'tripForm': tripForm, 'tripForm': tripForm,
'personTripFormSet': personTripFormSet, 'personTripFormSet': personTripFormSet,
@@ -262,9 +264,8 @@ def delLogbookEntry(lbe):
def get_people(request, expeditionslug): def get_people(request, expeditionslug):
exp = Expedition.objects.get(year = expeditionslug) exp = Expedition.objects.get(year = expeditionslug)
return render_with_context(request,'options.html', {"items": [(pe.slug, pe.name) for pe in exp.personexpedition_set.all()]}) return render(request,'options.html', {"items": [(pe.slug, pe.name) for pe in exp.personexpedition_set.all()]})
def get_logbook_entries(request, expeditionslug): def get_logbook_entries(request, expeditionslug):
exp = Expedition.objects.get(year = expeditionslug) exp = Expedition.objects.get(year = expeditionslug)
return render_with_context(request,'options.html', {"items": [(le.slug, "%s - %s" % (le.date, le.title)) for le in exp.logbookentry_set.all()]}) return render(request,'options.html', {"items": [(le.slug, "%s - %s" % (le.date, le.title)) for le in exp.logbookentry_set.all()]})

View File

@@ -4,11 +4,11 @@ from django.conf import settings
from django import forms from django import forms
from django.template import loader, Context from django.template import loader, Context
from django.db.models import Q from django.db.models import Q
from django.shortcuts import render
import databaseReset import databaseReset
import re import re
from django.http import HttpResponse, HttpResponseRedirect from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
from utils import render_with_context
from troggle.core.models import * from troggle.core.models import *
from troggle.helper import login_required_if_public from troggle.helper import login_required_if_public
@@ -21,21 +21,21 @@ def stats(request):
statsDict['caveCount'] = int(Cave.objects.count()) statsDict['caveCount'] = int(Cave.objects.count())
statsDict['personCount'] = int(Person.objects.count()) statsDict['personCount'] = int(Person.objects.count())
statsDict['logbookEntryCount'] = int(LogbookEntry.objects.count()) statsDict['logbookEntryCount'] = int(LogbookEntry.objects.count())
return render_with_context(request,'statistics.html', statsDict) return render(request,'statistics.html', statsDict)
def frontpage(request): def frontpage(request):
if request.user.is_authenticated(): if request.user.is_authenticated():
return render_with_context(request,'tasks.html') return render(request,'tasks.html')
expeditions = Expedition.objects.order_by("-year") expeditions = Expedition.objects.order_by("-year")
logbookentry = LogbookEntry logbookentry = LogbookEntry
cave = Cave cave = Cave
photo = DPhoto photo = DPhoto
from django.contrib.admin.templatetags import log from django.contrib.admin.templatetags import log
return render_with_context(request,'frontpage.html', locals()) return render(request,'frontpage.html', locals())
def todo(request): def todo(request):
message = "no test message" #reverse('personn', kwargs={"name":"hkjhjh"}) message = "no test message" #reverse('personn', kwargs={"name":"hkjhjh"})
if "reloadexpos" in request.GET: if "reloadexpos" in request.GET:
message = LoadPersonsExpos() message = LoadPersonsExpos()
message = "Reloaded personexpos" message = "Reloaded personexpos"
@@ -45,47 +45,47 @@ def todo(request):
expeditions = Expedition.objects.order_by("-year") expeditions = Expedition.objects.order_by("-year")
totallogbookentries = LogbookEntry.objects.count() totallogbookentries = LogbookEntry.objects.count()
return render_with_context(request,'index.html', {'expeditions':expeditions, 'all':'all', 'totallogbookentries':totallogbookentries, "message":message}) return render(request,'index.html', {'expeditions':expeditions, 'all':'all', 'totallogbookentries':totallogbookentries, "message":message})
def controlPanel(request): def controlPanel(request):
jobs_completed=[] jobs_completed=[]
if request.method=='POST': if request.method=='POST':
if request.user.is_superuser: if request.user.is_superuser:
#importlist is mostly here so that things happen in the correct order. #importlist is mostly here so that things happen in the correct order.
#http post data seems to come in an unpredictable order, so we do it this way. #http post data seems to come in an unpredictable order, so we do it this way.
importlist=['reload_db', 'import_people', 'import_cavetab', 'import_logbooks', 'import_surveys', 'import_QMs'] importlist=['reload_db', 'import_people', 'import_cavetab', 'import_logbooks', 'import_surveys', 'import_QMs']
databaseReset.make_dirs() databaseReset.make_dirs()
for item in importlist: for item in importlist:
if item in request.POST: if item in request.POST:
print "running"+ " databaseReset."+item+"()" print("running"+ " databaseReset."+item+"()")
exec "databaseReset."+item+"()" exec("databaseReset."+item+"()")
jobs_completed.append(item) jobs_completed.append(item)
else: else:
if request.user.is_authenticated(): #The user is logged in, but is not a superuser. if request.user.is_authenticated(): #The user is logged in, but is not a superuser.
return render_with_context(request,'controlPanel.html', {'caves':Cave.objects.all(),'error':'You must be a superuser to use that feature.'}) return render(request,'controlPanel.html', {'caves':Cave.objects.all(),'error':'You must be a superuser to use that feature.'})
else: else:
return HttpResponseRedirect(reverse('auth_login')) return HttpResponseRedirect(reverse('auth_login'))
return render_with_context(request,'controlPanel.html', {'caves':Cave.objects.all(),'expeditions':Expedition.objects.all(),'jobs_completed':jobs_completed}) return render(request,'controlPanel.html', {'caves':Cave.objects.all(),'expeditions':Expedition.objects.all(),'jobs_completed':jobs_completed})
def downloadCavetab(request): def downloadCavetab(request):
from export import tocavetab from export import tocavetab
response = HttpResponse(mimetype='text/csv') response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=CAVETAB2.CSV' response['Content-Disposition'] = 'attachment; filename=CAVETAB2.CSV'
tocavetab.writeCaveTab(response) tocavetab.writeCaveTab(response)
return response return response
def downloadSurveys(request): def downloadSurveys(request):
from export import tosurveys from export import tosurveys
response = HttpResponse(mimetype='text/csv') response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=Surveys.csv' response['Content-Disposition'] = 'attachment; filename=Surveys.csv'
tosurveys.writeCaveTab(response) tosurveys.writeCaveTab(response)
return response return response
def downloadLogbook(request,year=None,extension=None,queryset=None): def downloadLogbook(request,year=None,extension=None,queryset=None):
if year: if year:
current_expedition=Expedition.objects.get(year=year) current_expedition=Expedition.objects.get(year=year)
logbook_entries=LogbookEntry.objects.filter(expedition=current_expedition) logbook_entries=LogbookEntry.objects.filter(expedition=current_expedition)
@@ -94,29 +94,28 @@ def downloadLogbook(request,year=None,extension=None,queryset=None):
logbook_entries=queryset logbook_entries=queryset
filename='logbook' filename='logbook'
else: else:
response = HttpResponse(content_type='text/plain')
return response(r"Error: Logbook downloader doesn't know what year you want") return response(r"Error: Logbook downloader doesn't know what year you want")
if 'year' in request.GET: if 'year' in request.GET:
year=request.GET['year'] year=request.GET['year']
if 'extension' in request.GET: if 'extension' in request.GET:
extension=request.GET['extension'] extension=request.GET['extension']
if extension =='txt': if extension =='txt':
response = HttpResponse(mimetype='text/plain') response = HttpResponse(content_type='text/plain')
style='2008' style='2008'
elif extension == 'html': elif extension == 'html':
response = HttpResponse(mimetype='text/html') response = HttpResponse(content_type='text/html')
style='2005' style='2005'
template='logbook'+style+'style.'+extension template='logbook'+style+'style.'+extension
response['Content-Disposition'] = 'attachment; filename='+filename+'.'+extension response['Content-Disposition'] = 'attachment; filename='+filename+'.'+extension
t=loader.get_template(template) t=loader.get_template(template)
c=Context({'logbook_entries':logbook_entries}) c=Context({'logbook_entries':logbook_entries})
response.write(t.render(c)) response.write(t.render(c))
return response return response
def downloadQMs(request): def downloadQMs(request):
# Note to self: use get_cave method for the below # Note to self: use get_cave method for the below
@@ -124,29 +123,29 @@ def downloadQMs(request):
try: try:
cave=Cave.objects.get(kataster_number=request.GET['cave_id']) cave=Cave.objects.get(kataster_number=request.GET['cave_id'])
except Cave.DoesNotExist: except Cave.DoesNotExist:
cave=Cave.objects.get(name=cave_id) cave=Cave.objects.get(name=request.GET['cave_id'])
from export import toqms from export import toqms
response = HttpResponse(mimetype='text/csv') response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=qm.csv' response['Content-Disposition'] = 'attachment; filename=qm.csv'
toqms.writeQmTable(response,cave) toqms.writeQmTable(response,cave)
return response return response
def ajax_test(request): def ajax_test(request):
post_text = request.POST['post_data'] post_text = request.POST['post_data']
return HttpResponse("{'response_text': '"+post_text+" recieved.'}", return HttpResponse("{'response_text': '"+post_text+" recieved.'}",
mimetype="application/json") content_type="application/json")
def eyecandy(request): def eyecandy(request):
return return
def ajax_QM_number(request): def ajax_QM_number(request):
if request.method=='POST': if request.method=='POST':
cave=Cave.objects.get(id=request.POST['cave']) cave=Cave.objects.get(id=request.POST['cave'])
print cave print(cave)
exp=Expedition.objects.get(pk=request.POST['year']) exp=Expedition.objects.get(pk=request.POST['year'])
print exp print(exp)
res=cave.new_QM_number(exp.year) res=cave.new_QM_number(exp.year)
return HttpResponse(res) return HttpResponse(res)
@@ -159,15 +158,15 @@ def logbook_entry_suggestions(request):
unwiki_QM_pattern=r"(?P<whole>(?P<explorer_code>[ABC]?)(?P<cave>\d*)-?(?P<year>\d\d\d?\d?)-(?P<number>\d\d)(?P<grade>[ABCDXV]?))" unwiki_QM_pattern=r"(?P<whole>(?P<explorer_code>[ABC]?)(?P<cave>\d*)-?(?P<year>\d\d\d?\d?)-(?P<number>\d\d)(?P<grade>[ABCDXV]?))"
unwiki_QM_pattern=re.compile(unwiki_QM_pattern) unwiki_QM_pattern=re.compile(unwiki_QM_pattern)
#wikilink_QM_pattern=settings.QM_PATTERN #wikilink_QM_pattern=settings.QM_PATTERN
slug=request.POST['slug'] slug=request.POST['slug']
date=request.POST['date'] date=request.POST['date']
lbo=LogbookEntry.objects.get(slug=slug, date=date) lbo=LogbookEntry.objects.get(slug=slug, date=date)
#unwiki_QMs=re.findall(unwiki_QM_pattern,lbo.text) #unwiki_QMs=re.findall(unwiki_QM_pattern,lbo.text)
unwiki_QMs=[m.groupdict() for m in unwiki_QM_pattern.finditer(lbo.text)] unwiki_QMs=[m.groupdict() for m in unwiki_QM_pattern.finditer(lbo.text)]
print unwiki_QMs print(unwiki_QMs)
for qm in unwiki_QMs: for qm in unwiki_QMs:
#try: #try:
if len(qm['year'])==2: if len(qm['year'])==2:
@@ -180,29 +179,29 @@ def logbook_entry_suggestions(request):
try: try:
lbo=LogbookEntry.objects.get(date__year=qm['year'],title__icontains="placeholder for QMs in") lbo=LogbookEntry.objects.get(date__year=qm['year'],title__icontains="placeholder for QMs in")
except: except:
print "failed to get placeholder for year "+str(qm['year']) print("failed to get placeholder for year "+str(qm['year']))
temp_QM=QM(found_by=lbo,number=qm['number'],grade=qm['grade']) temp_QM=QM(found_by=lbo,number=qm['number'],grade=qm['grade'])
temp_QM.grade=qm['grade'] temp_QM.grade=qm['grade']
qm['wikilink']=temp_QM.wiki_link() qm['wikilink']=temp_QM.wiki_link()
#except: #except:
#print 'failed' #print 'failed'
print unwiki_QMs print(unwiki_QMs)
#wikilink_QMs=re.findall(wikilink_QM_pattern,lbo.text) #wikilink_QMs=re.findall(wikilink_QM_pattern,lbo.text)
attached_QMs=lbo.QMs_found.all() attached_QMs=lbo.QMs_found.all()
unmentioned_attached_QMs=''#not implemented, fill this in by subtracting wiklink_QMs from attached_QMs unmentioned_attached_QMs=''#not implemented, fill this in by subtracting wiklink_QMs from attached_QMs
#Find unattached_QMs. We only look at the QMs with a proper wiki link. #Find unattached_QMs. We only look at the QMs with a proper wiki link.
#for qm in wikilink_QMs: #for qm in wikilink_QMs:
#Try to look up the QM. #Try to look up the QM.
print 'got 208' print('got 208')
any_suggestions=True any_suggestions=True
print 'got 210' print('got 210')
return render_with_context(request,'suggestions.html', return render(request,'suggestions.html',
{ {
'unwiki_QMs':unwiki_QMs, 'unwiki_QMs':unwiki_QMs,
'any_suggestions':any_suggestions 'any_suggestions':any_suggestions
@@ -218,11 +217,11 @@ def newFile(request, pslug = None):
# personTripFormSet = PersonTripFormSet(request.POST) # personTripFormSet = PersonTripFormSet(request.POST)
# if tripForm.is_valid() and personTripFormSet.is_valid(): # All validation rules pass # if tripForm.is_valid() and personTripFormSet.is_valid(): # All validation rules pass
# dateStr = tripForm.cleaned_data["date"].strftime("%Y-%m-%d") # dateStr = tripForm.cleaned_data["date"].strftime("%Y-%m-%d")
# directory = os.path.join(settings.EXPOWEB, # directory = os.path.join(settings.EXPOWEB,
# "years", # "years",
# expedition.year, # expedition.year,
# "autologbook") # "autologbook")
# filename = os.path.join(directory, # filename = os.path.join(directory,
# dateStr + "." + slugify(tripForm.cleaned_data["title"])[:50] + ".html") # dateStr + "." + slugify(tripForm.cleaned_data["title"])[:50] + ".html")
# if not os.path.isdir(directory): # if not os.path.isdir(directory):
# os.mkdir(directory) # os.mkdir(directory)
@@ -230,7 +229,7 @@ def newFile(request, pslug = None):
# delLogbookEntry(previouslbe) # delLogbookEntry(previouslbe)
# f = open(filename, "w") # f = open(filename, "w")
# template = loader.get_template('dataformat/logbookentry.html') # template = loader.get_template('dataformat/logbookentry.html')
# context = Context({'trip': tripForm.cleaned_data, # context = Context({'trip': tripForm.cleaned_data,
# 'persons': personTripFormSet.cleaned_data, # 'persons': personTripFormSet.cleaned_data,
# 'date': dateStr, # 'date': dateStr,
# 'expeditionyear': expeditionyear}) # 'expeditionyear': expeditionyear})
@@ -255,14 +254,14 @@ def newFile(request, pslug = None):
# "location": previouslbe.place, # "location": previouslbe.place,
# "caveOrLocation": "location", # "caveOrLocation": "location",
# "html": previouslbe.text}) # "html": previouslbe.text})
# personTripFormSet = PersonTripFormSet(initial=[{"name": get_name(py.personexpedition), # personTripFormSet = PersonTripFormSet(initial=[{"name": get_name(py.personexpedition),
# "TU": py.time_underground, # "TU": py.time_underground,
# "author": py.is_logbook_entry_author} # "author": py.is_logbook_entry_author}
# for py in previouslbe.persontrip_set.all()]) # for py in previouslbe.persontrip_set.all()])
# else: # else:
# fileform = UploadFileForm() # An unbound form # fileform = UploadFileForm() # An unbound form
return render_with_context(request, 'editfile.html', { return render(request, 'editfile.html', {
'fileForm': fileform, 'fileForm': fileform,
}) })

View File

@@ -1,6 +1,8 @@
from django import forms from django import forms
from django.http import HttpResponseRedirect, HttpResponse from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response from django.shortcuts import render
from django.views.decorators import csrf
from django.views.decorators.csrf import csrf_protect
from django.http import HttpResponse, Http404 from django.http import HttpResponse, Http404
import re import re
import os import os
@@ -15,7 +17,7 @@ import troggle.settings as settings
import parsers.survex import parsers.survex
survextemplatefile = """; Locn: Totes Gebirge, Austria - Loser/Augst-Eck Plateau (kataster group 1623) survextemplatefile = """; Locn: Totes Gebirge, Austria - Loser/Augst-Eck Plateau (kataster group 1623)
; Cave: ; Cave:
*begin [surveyname] *begin [surveyname]
@@ -38,65 +40,9 @@ survextemplatefile = """; Locn: Totes Gebirge, Austria - Loser/Augst-Eck Plateau
*data passage station left right up down ignoreall *data passage station left right up down ignoreall
1 [L] [R] [U] [D] comment 1 [L] [R] [U] [D] comment
*end [surveyname]""" *end [surveyname]"""
def millenialcaves(request):
cavesdir = os.path.join(settings.SURVEX_DATA, "caves-1623")
#cavesdircontents = { }
onefilecaves = [ ]
multifilecaves = [ ]
subdircaves = [ ]
millenialcaves = [ ]
# go through the list and identify the contents of each cave directory
for cavedir in os.listdir(cavesdir):
if cavedir in ["144", "40"]: #????? RW
continue
gcavedir = os.path.join(cavesdir, cavedir) #directory od 'large' cave
if os.path.isdir(gcavedir) and cavedir[0] != ".":
subdirs, subsvx = identifycavedircontents(gcavedir)
survdirobj = [ ]
for lsubsvx in subsvx:
survdirobj.append(("caves-1623/"+cavedir+"/"+lsubsvx, lsubsvx))
# caves with subdirectories
if subdirs:
subsurvdirs = [ ]
for subdir in subdirs:
dsubdirs, dsubsvx = identifycavedircontents(os.path.join(gcavedir, subdir))
assert not dsubdirs
lsurvdirobj = [ ]
for lsubsvx in dsubsvx:
lsurvdirobj.append(("caves-1623/"+cavedir+"/"+subdir+"/"+lsubsvx, lsubsvx))
subsurvdirs.append((lsurvdirobj[0], lsurvdirobj[1:]))
subdircaves.append((cavedir, (survdirobj[0], survdirobj[1:]), subsurvdirs))
# multifile caves
elif len(survdirobj) > 1:
multifilecaves.append((survdirobj[0], survdirobj[1:]))
# single file caves
else:
#print("survdirobj = ")
#print(survdirobj)
onefilecaves.append(survdirobj[0])
caves = Cave.objects.all()
return render_to_response('millenialcaves.html', {'settings': settings , 'caves':caves , "onefilecaves":onefilecaves, "multifilecaves":multifilecaves, "subdircaves":subdircaves })
def ReplaceTabs(stext): def ReplaceTabs(stext):
res = [ ] res = [ ]
nsl = 0 nsl = 0
@@ -119,7 +65,7 @@ class SvxForm(forms.Form):
datetime = forms.DateTimeField(widget=forms.TextInput(attrs={"readonly":True})) datetime = forms.DateTimeField(widget=forms.TextInput(attrs={"readonly":True}))
outputtype = forms.CharField(widget=forms.TextInput(attrs={"readonly":True})) outputtype = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
code = forms.CharField(widget=forms.Textarea(attrs={"cols":150, "rows":18})) code = forms.CharField(widget=forms.Textarea(attrs={"cols":150, "rows":18}))
def GetDiscCode(self): def GetDiscCode(self):
fname = settings.SURVEX_DATA + self.data['filename'] + ".svx" fname = settings.SURVEX_DATA + self.data['filename'] + ".svx"
if not os.path.isfile(fname): if not os.path.isfile(fname):
@@ -129,7 +75,7 @@ class SvxForm(forms.Form):
svxtext = ReplaceTabs(svxtext).strip() svxtext = ReplaceTabs(svxtext).strip()
fin.close() fin.close()
return svxtext return svxtext
def DiffCode(self, rcode): def DiffCode(self, rcode):
code = self.GetDiscCode() code = self.GetDiscCode()
difftext = difflib.unified_diff(code.splitlines(), rcode.splitlines()) difftext = difflib.unified_diff(code.splitlines(), rcode.splitlines())
@@ -140,14 +86,14 @@ class SvxForm(forms.Form):
fname = settings.SURVEX_DATA + self.data['filename'] + ".svx" fname = settings.SURVEX_DATA + self.data['filename'] + ".svx"
if not os.path.isfile(fname): if not os.path.isfile(fname):
# only save if appears valid # only save if appears valid
if re.search(r"\[|\]", rcode): if re.search(r"\[|\]", rcode):
return "Error: clean up all []s from the text" return "Error: clean up all []s from the text"
mbeginend = re.search(r"(?s)\*begin\s+(\w+).*?\*end\s+(\w+)", rcode) mbeginend = re.search(r"(?s)\*begin\s+(\w+).*?\*end\s+(\w+)", rcode)
if not mbeginend: if not mbeginend:
return "Error: no begin/end block here" return "Error: no begin/end block here"
if mbeginend.group(1) != mbeginend.group(2): if mbeginend.group(1) != mbeginend.group(2):
return "Error: mismatching beginend" return "Error: mismatching beginend"
fout = open(fname, "w") fout = open(fname, "w")
res = fout.write(rcode.encode("latin1")) res = fout.write(rcode.encode("latin1"))
fout.close() fout.close()
@@ -165,28 +111,28 @@ class SvxForm(forms.Form):
log = re.sub("(?s).*?(Survey contains)", "\\1", log) log = re.sub("(?s).*?(Survey contains)", "\\1", log)
return log return log
@csrf_protect
def svx(request, survex_file): def svx(request, survex_file):
# get the basic data from the file given in the URL # get the basic data from the file given in the URL
dirname = os.path.split(survex_file)[0] dirname = os.path.split(survex_file)[0]
dirname += "/" dirname += "/"
nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
outputtype = "normal" outputtype = "normal"
form = SvxForm({'filename':survex_file, 'dirname':dirname, 'datetime':nowtime, 'outputtype':outputtype}) form = SvxForm({'filename':survex_file, 'dirname':dirname, 'datetime':nowtime, 'outputtype':outputtype})
# if the form has been returned # if the form has been returned
difflist = [ ] difflist = [ ]
logmessage = "" logmessage = ""
message = "" message = ""
if request.method == 'POST': # If the form has been submitted... if request.method == 'POST': # If the form has been submitted...
rform = SvxForm(request.POST) # rform = SvxForm(request.POST) #
if rform.is_valid(): # All validation rules pass (how do we check it against the filename and users?) if rform.is_valid(): # All validation rules pass (how do we check it against the filename and users?)
rcode = rform.cleaned_data['code'] rcode = rform.cleaned_data['code']
outputtype = rform.cleaned_data['outputtype'] outputtype = rform.cleaned_data['outputtype']
difflist = form.DiffCode(rcode) difflist = form.DiffCode(rcode)
#print "ssss", rform.data #print "ssss", rform.data
if "revert" in rform.data: if "revert" in rform.data:
pass pass
if "process" in rform.data: if "process" in rform.data:
@@ -207,20 +153,20 @@ def svx(request, survex_file):
form.data['code'] = rcode form.data['code'] = rcode
if "diff" in rform.data: if "diff" in rform.data:
form.data['code'] = rcode form.data['code'] = rcode
#process(survex_file) #process(survex_file)
if 'code' not in form.data: if 'code' not in form.data:
form.data['code'] = form.GetDiscCode() form.data['code'] = form.GetDiscCode()
if not difflist: if not difflist:
difflist.append("none") difflist.append("none")
if message: if message:
difflist.insert(0, message) difflist.insert(0, message)
#print [ form.data['code'] ] #print [ form.data['code'] ]
svxincludes = re.findall(r'\*include\s+(\S+)(?i)', form.data['code'] or "") svxincludes = re.findall(r'\*include\s+(\S+)(?i)', form.data['code'] or "")
vmap = {'settings': settings, vmap = {'settings': settings,
'has_3d': os.path.isfile(settings.SURVEX_DATA + survex_file + ".3d"), 'has_3d': os.path.isfile(settings.SURVEX_DATA + survex_file + ".3d"),
'title': survex_file, 'title': survex_file,
@@ -228,13 +174,14 @@ def svx(request, survex_file):
'difflist': difflist, 'difflist': difflist,
'logmessage':logmessage, 'logmessage':logmessage,
'form':form} 'form':form}
# vmap.update(csrf(request))
if outputtype == "ajax": if outputtype == "ajax":
return render_to_response('svxfiledifflistonly.html', vmap) return render(request, 'svxfiledifflistonly.html', vmap)
return render_to_response('svxfile.html', vmap) return render(request, 'svxfile.html', vmap)
def svxraw(request, survex_file): def svxraw(request, survex_file):
svx = open(os.path.join(settings.SURVEX_DATA, survex_file+".svx"), "rb") svx = open(os.path.join(settings.SURVEX_DATA, survex_file+".svx"), "rb")
return HttpResponse(svx, mimetype="text") return HttpResponse(svx, content_type="text")
# The cavern running function # The cavern running function
@@ -249,22 +196,22 @@ def threed(request, survex_file):
process(survex_file) process(survex_file)
try: try:
threed = open(settings.SURVEX_DATA + survex_file + ".3d", "rb") threed = open(settings.SURVEX_DATA + survex_file + ".3d", "rb")
return HttpResponse(threed, mimetype="model/3d") return HttpResponse(threed, content_type="model/3d")
except: except:
log = open(settings.SURVEX_DATA + survex_file + ".log", "rb") log = open(settings.SURVEX_DATA + survex_file + ".log", "rb")
return HttpResponse(log, mimetype="text") return HttpResponse(log, content_type="text")
def log(request, survex_file): def log(request, survex_file):
process(survex_file) process(survex_file)
log = open(settings.SURVEX_DATA + survex_file + ".log", "rb") log = open(settings.SURVEX_DATA + survex_file + ".log", "rb")
return HttpResponse(log, mimetype="text") return HttpResponse(log, content_type="text")
def err(request, survex_file): def err(request, survex_file):
process(survex_file) process(survex_file)
err = open(settings.SURVEX_DATA + survex_file + ".err", "rb") err = open(settings.SURVEX_DATA + survex_file + ".err", "rb")
return HttpResponse(err, mimetype="text") return HttpResponse(err, content_type="text")
def identifycavedircontents(gcavedir): def identifycavedircontents(gcavedir):
@@ -280,13 +227,13 @@ def identifycavedircontents(gcavedir):
pass pass
elif name == "115" and (f in ["115cufix.svx", "115fix.svx"]): elif name == "115" and (f in ["115cufix.svx", "115fix.svx"]):
pass pass
elif os.path.isdir(os.path.join(gcavedir, f)): elif os.path.isdir(os.path.join(gcavedir, f)):
if f[0] != ".": if f[0] != ".":
subdirs.append(f) subdirs.append(f)
elif f[-4:] == ".svx": elif f[-4:] == ".svx":
nf = f[:-4] nf = f[:-4]
if nf.lower() == name.lower() or nf[:3] == "all" or (name, nf) in [("resurvey2005", "145-2005"), ("cucc", "cu115")]: if nf.lower() == name.lower() or nf[:3] == "all" or (name, nf) in [("resurvey2005", "145-2005"), ("cucc", "cu115")]:
if primesvx: if primesvx:
if nf[:3] == "all": if nf[:3] == "all":
@@ -306,38 +253,50 @@ def identifycavedircontents(gcavedir):
if primesvx: if primesvx:
subsvx.insert(0, primesvx) subsvx.insert(0, primesvx)
return subdirs, subsvx return subdirs, subsvx
# direct local non-database browsing through the svx file repositories # direct local non-database browsing through the svx file repositories
# perhaps should use the database and have a reload button for it # perhaps should use the database and have a reload button for it
def survexcaveslist(request): def survexcaveslist(request):
cavesdir = os.path.join(settings.SURVEX_DATA, "caves-1623") kat_areas = settings.KAT_AREAS
#cavesdircontents = { }
fnumlist = []
kat_areas = ['1623']
for area in kat_areas:
print(area)
cavesdir = os.path.join(settings.SURVEX_DATA, "caves-%s" % area)
print(cavesdir)
#cavesdircontents = { }
fnumlist += [ (-int(re.match(r"\d*", f).group(0) or "0"), f, area) for f in os.listdir(cavesdir) ]
print(fnumlist)
print(len(fnumlist))
# first sort the file list
fnumlist.sort()
onefilecaves = [ ] onefilecaves = [ ]
multifilecaves = [ ] multifilecaves = [ ]
subdircaves = [ ] subdircaves = [ ]
# first sort the file list
fnumlist = [ (-int(re.match(r"\d*", f).group(0) or "0"), f) for f in os.listdir(cavesdir) ]
fnumlist.sort()
print(fnumlist) print(fnumlist)
# go through the list and identify the contents of each cave directory # go through the list and identify the contents of each cave directory
for num, cavedir in fnumlist: for num, cavedir, area in fnumlist:
if cavedir in ["144", "40"]: if cavedir in ["144", "40"]:
continue continue
cavesdir = os.path.join(settings.SURVEX_DATA, "caves-%s" % area)
gcavedir = os.path.join(cavesdir, cavedir) gcavedir = os.path.join(cavesdir, cavedir)
if os.path.isdir(gcavedir) and cavedir[0] != ".": if os.path.isdir(gcavedir) and cavedir[0] != ".":
subdirs, subsvx = identifycavedircontents(gcavedir) subdirs, subsvx = identifycavedircontents(gcavedir)
survdirobj = [ ] survdirobj = [ ]
for lsubsvx in subsvx: for lsubsvx in subsvx:
survdirobj.append(("caves-1623/"+cavedir+"/"+lsubsvx, lsubsvx)) survdirobj.append(("caves-" + area + "/"+cavedir+"/"+lsubsvx, lsubsvx))
# caves with subdirectories # caves with subdirectories
if subdirs: if subdirs:
subsurvdirs = [ ] subsurvdirs = [ ]
@@ -346,10 +305,10 @@ def survexcaveslist(request):
assert not dsubdirs assert not dsubdirs
lsurvdirobj = [ ] lsurvdirobj = [ ]
for lsubsvx in dsubsvx: for lsubsvx in dsubsvx:
lsurvdirobj.append(("caves-1623/"+cavedir+"/"+subdir+"/"+lsubsvx, lsubsvx)) lsurvdirobj.append(("caves-" + area + "/"+cavedir+"/"+subdir+"/"+lsubsvx, lsubsvx))
subsurvdirs.append((lsurvdirobj[0], lsurvdirobj[1:])) subsurvdirs.append((lsurvdirobj[0], lsurvdirobj[1:]))
subdircaves.append((cavedir, (survdirobj[0], survdirobj[1:]), subsurvdirs)) subdircaves.append((cavedir, (survdirobj[0], survdirobj[1:]), subsurvdirs))
# multifile caves # multifile caves
elif len(survdirobj) > 1: elif len(survdirobj) > 1:
multifilecaves.append((survdirobj[0], survdirobj[1:])) multifilecaves.append((survdirobj[0], survdirobj[1:]))
@@ -358,24 +317,22 @@ def survexcaveslist(request):
#print("survdirobj = ") #print("survdirobj = ")
#print(survdirobj) #print(survdirobj)
onefilecaves.append(survdirobj[0]) onefilecaves.append(survdirobj[0])
return render_to_response('svxfilecavelist.html', {'settings': settings, "onefilecaves":onefilecaves, "multifilecaves":multifilecaves, "subdircaves":subdircaves })
return render(request, 'svxfilecavelist.html', {"onefilecaves":onefilecaves, "multifilecaves":multifilecaves, "subdircaves":subdircaves })
# parsing all the survex files of a single cave and showing that it's consistent and can find all the files and people # parsing all the survex files of a single cave and showing that it's consistent and can find all the files and people
# doesn't use recursion. just writes it twice # doesn't use recursion. just writes it twice
def survexcavesingle(request, survex_cave): def survexcavesingle(request, survex_cave):
breload = False breload = False
cave = Cave.objects.get(kataster_number=survex_cave) cave = Cave.objects.filter(kataster_number=survex_cave)
if len(cave) < 1:
cave = Cave.objects.filter(unofficial_number=survex_cave)
if breload: if breload:
parsers.survex.ReloadSurvexCave(survex_cave) parsers.survex.ReloadSurvexCave(survex_cave)
return render_to_response('svxcavesingle.html', {'settings': settings, "cave":cave }) if len(cave) > 0:
return render(request, 'svxcavesingle.html', {"cave":cave[0] })
else:
return render(request, 'svxcavesingle.html', {"cave":cave })

View File

@@ -3,6 +3,11 @@ import time
import settings import settings
os.environ['PYTHONPATH'] = settings.PYTHON_PATH os.environ['PYTHONPATH'] = settings.PYTHON_PATH
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings') os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
if __name__ == '__main__':
import django
django.setup()
from django.core import management from django.core import management
from django.db import connection from django.db import connection
from django.contrib.auth.models import User from django.contrib.auth.models import User
@@ -28,7 +33,7 @@ def reload_db():
cursor.execute("CREATE DATABASE %s" % databasename) cursor.execute("CREATE DATABASE %s" % databasename)
cursor.execute("ALTER DATABASE %s CHARACTER SET=utf8" % databasename) cursor.execute("ALTER DATABASE %s CHARACTER SET=utf8" % databasename)
cursor.execute("USE %s" % databasename) cursor.execute("USE %s" % databasename)
management.call_command('syncdb', interactive=False) management.call_command('migrate', interactive=False)
user = User.objects.create_user(expouser, expouseremail, expouserpass) user = User.objects.create_user(expouser, expouseremail, expouserpass)
user.is_staff = True user.is_staff = True
user.is_superuser = True user.is_superuser = True
@@ -42,7 +47,7 @@ def make_dirs():
def import_caves(): def import_caves():
import parsers.caves import parsers.caves
print("importing caves") print("Importing Caves")
parsers.caves.readcaves() parsers.caves.readcaves()
def import_people(): def import_people():
@@ -56,7 +61,7 @@ def import_logbooks():
settings.LOGFILE.write('\nBegun importing logbooks at ' + time.asctime() +'\n'+'-'*60) settings.LOGFILE.write('\nBegun importing logbooks at ' + time.asctime() +'\n'+'-'*60)
except: except:
pass pass
import parsers.logbooks import parsers.logbooks
parsers.logbooks.LoadLogbooks() parsers.logbooks.LoadLogbooks()
@@ -90,14 +95,16 @@ def reset():
import_caves() import_caves()
import_people() import_people()
import_surveyscans() import_surveyscans()
import_survex()
import_logbooks() import_logbooks()
import_QMs() import_QMs()
import_survex()
try: try:
import_tunnelfiles() import_tunnelfiles()
except: except:
print("Tunnel files parser broken.") print("Tunnel files parser broken.")
import_surveys() import_surveys()
@@ -109,10 +116,10 @@ def import_auto_logbooks():
for lbe in troggle.core.models.LogbookEntry.objects.all(): for lbe in troggle.core.models.LogbookEntry.objects.all():
lbe.delete() lbe.delete()
for expedition in troggle.core.models.Expedition.objects.all(): for expedition in troggle.core.models.Expedition.objects.all():
directory = os.path.join(settings.EXPOWEB, directory = os.path.join(settings.EXPOWEB,
"years", "years",
expedition.year, expedition.year,
"autologbook") "autologbook")
for root, dirs, filenames in os.walk(directory): for root, dirs, filenames in os.walk(directory):
for filename in filenames: for filename in filenames:
print(os.path.join(root, filename)) print(os.path.join(root, filename))
@@ -129,13 +136,13 @@ def dumplogbooks():
return pe.person.first_name return pe.person.first_name
for lbe in troggle.core.models.LogbookEntry.objects.all(): for lbe in troggle.core.models.LogbookEntry.objects.all():
dateStr = lbe.date.strftime("%Y-%m-%d") dateStr = lbe.date.strftime("%Y-%m-%d")
directory = os.path.join(settings.EXPOWEB, directory = os.path.join(settings.EXPOWEB,
"years", "years",
lbe.expedition.year, lbe.expedition.year,
"autologbook") "autologbook")
if not os.path.isdir(directory): if not os.path.isdir(directory):
os.mkdir(directory) os.mkdir(directory)
filename = os.path.join(directory, filename = os.path.join(directory,
dateStr + "." + slugify(lbe.title)[:50] + ".html") dateStr + "." + slugify(lbe.title)[:50] + ".html")
if lbe.cave: if lbe.cave:
print(lbe.cave.reference()) print(lbe.cave.reference())
@@ -146,7 +153,7 @@ def dumplogbooks():
persons = [{"name": get_name(pt.personexpedition), "TU": pt.time_underground, "author": pt.is_logbook_entry_author} for pt in pts] persons = [{"name": get_name(pt.personexpedition), "TU": pt.time_underground, "author": pt.is_logbook_entry_author} for pt in pts]
f = open(filename, "wb") f = open(filename, "wb")
template = loader.get_template('dataformat/logbookentry.html') template = loader.get_template('dataformat/logbookentry.html')
context = Context({'trip': trip, context = Context({'trip': trip,
'persons': persons, 'persons': persons,
'date': dateStr, 'date': dateStr,
'expeditionyear': lbe.expedition.year}) 'expeditionyear': lbe.expedition.year})
@@ -195,9 +202,6 @@ if __name__ == "__main__":
elif "scans" in sys.argv: elif "scans" in sys.argv:
import_surveyscans() import_surveyscans()
elif "caves" in sys.argv: elif "caves" in sys.argv:
reload_db()
make_dirs()
pageredirects()
import_caves() import_caves()
elif "people" in sys.argv: elif "people" in sys.argv:
import_people() import_people()
@@ -218,14 +222,14 @@ if __name__ == "__main__":
import_descriptions() import_descriptions()
parse_descriptions() parse_descriptions()
elif "survex" in sys.argv: elif "survex" in sys.argv:
management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex # management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
import_survex() import_survex()
elif "survexpos" in sys.argv: elif "survexpos" in sys.argv:
management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex # management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
import parsers.survex import parsers.survex
parsers.survex.LoadPos() parsers.survex.LoadPos()
elif "logbooks" in sys.argv: elif "logbooks" in sys.argv:
management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex # management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
import_logbooks() import_logbooks()
elif "autologbooks" in sys.argv: elif "autologbooks" in sys.argv:
import_auto_logbooks() import_auto_logbooks()
@@ -237,10 +241,8 @@ if __name__ == "__main__":
import_surveys() import_surveys()
elif "help" in sys.argv: elif "help" in sys.argv:
usage() usage()
elif "reload_db" in sys.argv:
reload_db()
else: else:
print("%s not recognised" % sys.argv) print("%s not recognised" % sys.argv)
usage() usage()

View File

@@ -1,127 +0,0 @@
import os
import time
import settings
os.environ['PYTHONPATH'] = settings.PYTHON_PATH
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
from django.core import management
from django.db import connection
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from troggle.core.models import Cave, Entrance
from troggle.core.models import PersonM, SurveyM, CaveM, ExpeditionM, Logbook_entryM, Cave_descriptionM
import troggle.flatpages.models
databasename=settings.DATABASES['default']['NAME']
expouser=settings.EXPOUSER
expouserpass=settings.EXPOUSERPASS
expouseremail=settings.EXPOUSER_EMAIL
def destroy():
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
try:
os.remove(databasename)
except OSError:
pass
else:
cursor = connection.cursor()
cursor.execute("DROP DATABASE %s" % databasename)
cursor.execute("CREATE DATABASE %s" % databasename)
cursor.execute("ALTER DATABASE %s CHARACTER SET=utf8" % databasename)
cursor.execute("USE %s" % databasename)
management.call_command('syncdb', interactive=False)
user = User.objects.create_user(expouser, expouseremail, expouserpass)
user.is_staff = True
user.is_superuser = True
user.save()
print('Nuked the database and rebuilt it. You savage monster')
def gracefull_flush():
CaveM.objects.all().delete()
PersonM.objects.all().delete()
SurveyM.objects.all().delete()
ExpeditionM.objects.all().delete()
Logbook_entryM.objects.all().delete()
Cave_descriptionM.objects.all().delete()
print('Deleted contents of the database, ready to load new stuff :)')
def load_redirects():
for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
f = troggle.flatpages.models.Redirect(originalURL = oldURL, newURL = newURL)
f.save()
def load_surveys():
SurveyM.objects.all().delete()
import troggle.parsers.surveysM
troggle.parsers.surveysM.load()
def load_caves():
import troggle.parsers.cavesM
troggle.parsers.cavesM.load()
def load_people():
import troggle.parsers.peopleM
troggle.parsers.peopleM.load()
def load_all():
load_caves()
load_surveys()
load_people()
load_redirects()
load_links()
print('Loaded everything. Your database is ready to go :)')
def help():
print("""Usage is 'python databaseResetM.py <command>'
where command is:
UNLOADERS:
gracefull_flush - flushes new (M-style) databases contents but keeps tables existing
destroy - destroys entire database and builds empty tables
LOADERS:
load_all - loads all tables and links
load_caves - loads all caves
load_surveys - loads all surveys (corresponds to .svx files)
load_people - loads all people
load_redirects - load page redirects
load_links - loads links between classes (run last! can't link non-existent things)
OTHER:
help - displays this page
----------------
This is a new version of database management written by RW 2019
----------------
""")
if __name__ == "__main__":
import troggle.core.models
import sys
import django
django.setup()
if "destroy" in sys.argv:
destroy()
elif "gracefull_flush" in sys.argv:
gracefull_flush()
elif "load_all" in sys.argv:
load_all()
elif "load_caves" in sys.argv:
load_caves()
elif "load_surveys" in sys.argv:
load_surveys()
elif "load_people" in sys.argv:
load_people()
elif "load_redirects" in sys.argv:
load_redirects()
elif "load_links" in sys.argv:
load_links()
elif "help" in sys.argv:
help()
else:
print("%s not recognised" % sys.argv)
help()

85
debian/serversetup vendored Normal file
View File

@@ -0,0 +1,85 @@
Instructions for setting up new expo debian server/VM
For Debian Stretch, June 2019.
adduser expo
apt install openssh-server mosh tmux mc zile emacs-nox mc most ncdu
apt install python-django apache2 mysql-server survex make rsync
apt install libjs-openlayers make
apt install git mercurial mercurial-server?
for boe:
apt install libcgi-session-perl libcrypt-passwdmd5-perl libfile-slurp-perl libgit-wrapper-perl libhtml-template-perl libhtml-template-pro-perl libmime-lite-perl libtext-password-pronounceable-perl libtime-parsedate-perl libuuid-tiny-perl libcrypt-cracklib-perl
obsolete-packages:
bins (move to jigl?) (for photos)
python-django 1.7
backports: survex therion
not-packaged: caveview
make these dirs available at top documentroot:
cuccfiles
expofiles
loser (link to repo)
tunneldata (link to repo)
troggle (link to repo)
expoweb (link to repo)
boc/boe
config
containing:
setup apache configs for cucc and expo
#disable default website
a2dissite 000-default
a2ensite cucc
a2ensite expo
a2enmod cgid
Boe config:
Alias /boe /home/expo/boe/boc/boc.pl
<Directory /home/expo/boe/boc>
AddHandler cgi-script .pl
SetHandler cgi-script
Options +ExecCGI
Require all granted
</Directory>
And remember to set both program and data dir to be
www-data:www-data
(optionally make file group read/write by treasurer account)
create empty repo by clicking create in boe interface
then set names in 'settings'
Set up mysql (as root)
mysql -p
CREATE DATABASE troggle;
GRANT ALL PRIVILEGES ON troggle.* TO 'expo'@'localhost' IDENTIFIED BY 'somepassword';
install django:
sudo apt install python-django python-django-registration python-django-imagekit python-django-tinymce fonts-freefont-ttf libapache2-mod-wsgi
python-django-imagekit comes from https://salsa.debian.org/python-team/modules/python-django-imagekit
python-django-tinymce comes from https://salsa.debian.org/python-team/modules/python-django-tinymce
(both modified for stretch/python2). packages under /home/wookey/packages/
need fonts-freefont-ttf (to have truetype freesans available for troggle via PIL)
need libapache2-mod-wsgi for apache wsgi support.
On stretch the django 1.10 is no use so get rid of that:
apt remove python3-django python-django python-django-common python-django-doc
Then replace with django 1.7 (Needs to be built for stretch)
apt install python-django python-django-common python-django-doc
apt install python-django-registration python-django-imagekit python-django-tinymce
then hold them to stop them being upgraded by unattended upgrades:
echo "python-django hold" | sudo dpkg --set-selections
echo "python-django-common hold" | sudo dpkg --set-selections
echo "python-django-doc hold" | sudo dpkg --set-selections
#troggle has to have a writable logfile otherwise the website explodes
# 500 error on the server, and apache error log has non-rentrant errors
create /var/log/troggle/troggle.log
chown www-data:adm /var/log/troggle/troggle.log
chmod 660 /var/log/troggle/troggle.log

View File

@@ -2,15 +2,17 @@ FROM python:2.7-stretch
#COPY backports.list /etc/apt/sources.list.d/ #COPY backports.list /etc/apt/sources.list.d/
RUN apt-get -y update && apt-get install -y mercurial fonts-freefont-ttf locales survex RUN apt-get -y update && apt-get install -y mercurial \
fonts-freefont-ttf locales survex python-levenshtein \
python-pygraphviz
#RUN apt-get -y -t -backports install survex #RUN apt-get -y -t -backports install survex
# Set the locale # Set the locale
RUN locale-gen en_GB.UTF-8 RUN locale-gen en_GB.UTF-8
ENV LANG en_GB.UTF-8 ENV LANG en_GB.UTF-8
ENV LANGUAGE en_GB:en ENV LANGUAGE en_GB:en
ENV LC_ALL en_GB.UTF-8 ENV LC_ALL en_GB.UTF-8
WORKDIR /opt/expo/troggle WORKDIR /opt/expo/troggle
COPY requirements.txt . COPY requirements.txt .

View File

@@ -1 +1 @@
requirements.txt.dj-1.7.11 requirements.txt.dj-1.10

View File

@@ -0,0 +1,13 @@
Django==1.10.8
django-registration==2.1.2
mysql
django-imagekit
Image
django-tinymce
smartencoding
fuzzywuzzy
GitPython
unidecode
django-extensions
pygraphviz
python-Levenshtein

View File

@@ -6,3 +6,7 @@ django-imagekit
Image Image
django-tinymce==2.7.0 django-tinymce==2.7.0
smartencoding smartencoding
fuzzywuzzy
GitPython
unidecode
django-extensions

View File

@@ -33,4 +33,3 @@ def writeQmTable(outfile,cave):
cavewriter.writerow(headers) cavewriter.writerow(headers)
for qm in cave.get_QMs(): for qm in cave.get_QMs():
cavewriter.writerow(qmRow(qm)) cavewriter.writerow(qmRow(qm))

View File

@@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-02-18 16:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EntranceRedirect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('originalURL', models.CharField(max_length=200)),
('entrance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Entrance')),
],
),
migrations.CreateModel(
name='Redirect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('originalURL', models.CharField(max_length=200, unique=True)),
('newURL', models.CharField(max_length=200)),
],
),
]

View File

View File

@@ -1,6 +1,6 @@
import troggle.settings as settings import troggle.settings as settings
from troggle.helper import login_required_if_public from troggle.helper import login_required_if_public
from utils import render_with_context from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, Http404 from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
@@ -33,12 +33,11 @@ def flatpage(request, path):
except EntranceRedirect.DoesNotExist: except EntranceRedirect.DoesNotExist:
pass pass
if path.startswith("noinfo") and settings.PUBLIC_SITE and not request.user.is_authenticated(): if path.startswith("noinfo") and settings.PUBLIC_SITE and not request.user.is_authenticated():
print("flat path noinfo", path) print("flat path noinfo", path)
return HttpResponseRedirect(reverse("auth_login") + '?next=%s' % request.path) return HttpResponseRedirect(reverse("auth_login") + '?next=%s' % request.path)
if path.endswith("/") or path == "": if path.endswith("/") or path == "":
try: try:
o = open(os.path.normpath(settings.EXPOWEB + path + "index.html"), "rb") o = open(os.path.normpath(settings.EXPOWEB + path + "index.html"), "rb")
path = path + "index.html" path = path + "index.html"
@@ -47,16 +46,16 @@ def flatpage(request, path):
o = open(os.path.normpath(settings.EXPOWEB + path + "index.htm"), "rb") o = open(os.path.normpath(settings.EXPOWEB + path + "index.htm"), "rb")
path = path + "index.htm" path = path + "index.htm"
except IOError: except IOError:
return render_with_context(request, 'pagenotfound.html', {'path': path}) return render(request, 'pagenotfound.html', {'path': path})
else: else:
try: try:
filetobeopened = os.path.normpath(settings.EXPOWEB + path) filetobeopened = os.path.normpath(settings.EXPOWEB + path)
o = open(filetobeopened, "rb") o = open(filetobeopened, "rb")
except IOError: except IOError:
return render_with_context(request, 'pagenotfound.html', {'path': path}) return render(request, 'pagenotfound.html', {'path': path})
if path.endswith(".htm") or path.endswith(".html"): if path.endswith(".htm") or path.endswith(".html"):
html = o.read() html = o.read()
m = re.search(r"(.*)<\s*head([^>]*)>(.*)<\s*/head\s*>(.*)<\s*body([^>]*)>(.*)<\s*/body\s*>(.*)", html, re.DOTALL + re.IGNORECASE) m = re.search(r"(.*)<\s*head([^>]*)>(.*)<\s*/head\s*>(.*)<\s*body([^>]*)>(.*)<\s*/body\s*>(.*)", html, re.DOTALL + re.IGNORECASE)
if m: if m:
preheader, headerattrs, head, postheader, bodyattrs, body, postbody = m.groups() preheader, headerattrs, head, postheader, bodyattrs, body, postbody = m.groups()
@@ -75,7 +74,7 @@ def flatpage(request, path):
if re.search(r"iso-8859-1", html): if re.search(r"iso-8859-1", html):
body = unicode(body, "iso-8859-1") body = unicode(body, "iso-8859-1")
body.strip body.strip
return render_with_context(request, 'flatpage.html', {'editable': True, 'path': path, 'title': title, 'body': body, 'homepage': (path == "index.htm"), 'has_menu': has_menu}) return render(request, 'flatpage.html', {'editable': True, 'path': path, 'title': title, 'body': body, 'homepage': (path == "index.htm"), 'has_menu': has_menu})
else: else:
return HttpResponse(o.read(), content_type=getmimetype(path)) return HttpResponse(o.read(), content_type=getmimetype(path))
@@ -125,7 +124,7 @@ def editflatpage(request, path):
return HttpResponse("Page could not be split into header and body") return HttpResponse("Page could not be split into header and body")
except IOError: except IOError:
filefound = False filefound = False
if request.method == 'POST': # If the form has been submitted... if request.method == 'POST': # If the form has been submitted...
flatpageForm = FlatPageForm(request.POST) # A form bound to the POST data flatpageForm = FlatPageForm(request.POST) # A form bound to the POST data
@@ -142,7 +141,7 @@ def editflatpage(request, path):
headerargs = "" headerargs = ""
postheader = "" postheader = ""
bodyargs = "" bodyargs = ""
postbody = "</html>" postbody = "</html>"
body = flatpageForm.cleaned_data["html"] body = flatpageForm.cleaned_data["html"]
body = body.replace("\r", "") body = body.replace("\r", "")
result = u"%s<head%s>%s</head>%s<body%s>\n%s</body>%s" % (preheader, headerargs, head, postheader, bodyargs, body, postbody) result = u"%s<head%s>%s</head>%s<body%s>\n%s</body>%s" % (preheader, headerargs, head, postheader, bodyargs, body, postbody)
@@ -153,16 +152,16 @@ def editflatpage(request, path):
else: else:
if filefound: if filefound:
m = re.search(r"<title>(.*)</title>", head, re.DOTALL + re.IGNORECASE) m = re.search(r"<title>(.*)</title>", head, re.DOTALL + re.IGNORECASE)
if m: if m:
title, = m.groups() title, = m.groups()
else: else:
title = "" title = ""
flatpageForm = FlatPageForm({"html": body, "title": title}) flatpageForm = FlatPageForm({"html": body, "title": title})
else: else:
flatpageForm = FlatPageForm() flatpageForm = FlatPageForm()
return render_with_context(request, 'editflatpage.html', {'path': path, 'form': flatpageForm, }) return render(request, 'editflatpage.html', {'path': path, 'form': flatpageForm, })
class FlatPageForm(forms.Form): class FlatPageForm(forms.Form):
title = forms.CharField(widget=forms.TextInput(attrs={'size':'60'})) title = forms.CharField(widget=forms.TextInput(attrs={'size':'60'}))
html = forms.CharField(widget=forms.Textarea()) html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 20}))

View File

@@ -1,13 +0,0 @@
"""
Django ImageKit
Author: Justin Driscoll <justin.driscoll@gmail.com>
Version: 0.2
"""
VERSION = "0.2"

View File

@@ -1,21 +0,0 @@
""" Default ImageKit configuration """
from imagekit.specs import ImageSpec
from imagekit import processors
class ResizeThumbnail(processors.Resize):
width = 100
height = 50
crop = True
class EnhanceSmall(processors.Adjustment):
contrast = 1.2
sharpness = 1.1
class SampleReflection(processors.Reflection):
size = 0.5
background_color = "#000000"
class DjangoAdminThumbnail(ImageSpec):
access_as = 'admin_thumbnail'
processors = [ResizeThumbnail, EnhanceSmall, SampleReflection]

View File

@@ -1,17 +0,0 @@
# Required PIL classes may or may not be available from the root namespace
# depending on the installation method used.
try:
import Image
import ImageFile
import ImageFilter
import ImageEnhance
import ImageColor
except ImportError:
try:
from PIL import Image
from PIL import ImageFile
from PIL import ImageFilter
from PIL import ImageEnhance
from PIL import ImageColor
except ImportError:
raise ImportError('ImageKit was unable to import the Python Imaging Library. Please confirm it`s installed and available on your current Python path.')

View File

@@ -1 +0,0 @@

View File

@@ -1 +0,0 @@

View File

@@ -1,38 +0,0 @@
from django.db.models.loading import cache
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from imagekit.models import ImageModel
from imagekit.specs import ImageSpec
class Command(BaseCommand):
help = ('Clears all ImageKit cached files.')
args = '[apps]'
requires_model_validation = True
can_import_settings = True
def handle(self, *args, **options):
return flush_cache(args, options)
def flush_cache(apps, options):
""" Clears the image cache
"""
apps = [a.strip(',') for a in apps]
if apps:
print 'Flushing cache for %s...' % ', '.join(apps)
else:
print 'Flushing caches...'
for app_label in apps:
app = cache.get_app(app_label)
models = [m for m in cache.get_models(app) if issubclass(m, ImageModel)]
for model in models:
for obj in model.objects.all():
for spec in model._ik.specs:
prop = getattr(obj, spec.name(), None)
if prop is not None:
prop._delete()
if spec.pre_cache:
prop._create()

View File

@@ -1,136 +0,0 @@
import os
from datetime import datetime
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import models
from django.db.models.base import ModelBase
from django.utils.translation import ugettext_lazy as _
from imagekit import specs
from imagekit.lib import *
from imagekit.options import Options
from imagekit.utils import img_to_fobj
# Modify image file buffer size.
ImageFile.MAXBLOCK = getattr(settings, 'PIL_IMAGEFILE_MAXBLOCK', 256 * 2 ** 10)
# Choice tuples for specifying the crop origin.
# These are provided for convenience.
CROP_HORZ_CHOICES = (
(0, _('left')),
(1, _('center')),
(2, _('right')),
)
CROP_VERT_CHOICES = (
(0, _('top')),
(1, _('center')),
(2, _('bottom')),
)
class ImageModelBase(ModelBase):
""" ImageModel metaclass
This metaclass parses IKOptions and loads the specified specification
module.
"""
def __init__(cls, name, bases, attrs):
parents = [b for b in bases if isinstance(b, ImageModelBase)]
if not parents:
return
user_opts = getattr(cls, 'IKOptions', None)
opts = Options(user_opts)
try:
module = __import__(opts.spec_module, {}, {}, [''])
except ImportError:
raise ImportError('Unable to load imagekit config module: %s' % \
opts.spec_module)
for spec in [spec for spec in module.__dict__.values() \
if isinstance(spec, type) \
and issubclass(spec, specs.ImageSpec) \
and spec != specs.ImageSpec]:
setattr(cls, spec.name(), specs.Descriptor(spec))
opts.specs.append(spec)
setattr(cls, '_ik', opts)
class ImageModel(models.Model):
""" Abstract base class implementing all core ImageKit functionality
Subclasses of ImageModel are augmented with accessors for each defined
image specification and can override the inner IKOptions class to customize
storage locations and other options.
"""
__metaclass__ = ImageModelBase
class Meta:
abstract = True
class IKOptions:
pass
def admin_thumbnail_view(self):
if not self._imgfield:
return None
prop = getattr(self, self._ik.admin_thumbnail_spec, None)
if prop is None:
return 'An "%s" image spec has not been defined.' % \
self._ik.admin_thumbnail_spec
else:
if hasattr(self, 'get_absolute_url'):
return u'<a href="%s"><img src="%s"></a>' % \
(self.get_absolute_url(), prop.url)
else:
return u'<a href="%s"><img src="%s"></a>' % \
(self._imgfield.url, prop.url)
admin_thumbnail_view.short_description = _('Thumbnail')
admin_thumbnail_view.allow_tags = True
@property
def _imgfield(self):
return getattr(self, self._ik.image_field)
def _clear_cache(self):
for spec in self._ik.specs:
prop = getattr(self, spec.name())
prop._delete()
def _pre_cache(self):
for spec in self._ik.specs:
if spec.pre_cache:
prop = getattr(self, spec.name())
prop._create()
def save(self, clear_cache=True, *args, **kwargs):
is_new_object = self._get_pk_val is None
super(ImageModel, self).save(*args, **kwargs)
if is_new_object:
clear_cache = False
spec = self._ik.preprocessor_spec
if spec is not None:
newfile = self._imgfield.storage.open(str(self._imgfield))
img = Image.open(newfile)
img = spec.process(img, None)
format = img.format or 'JPEG'
if format != 'JPEG':
imgfile = img_to_fobj(img, format)
else:
imgfile = img_to_fobj(img, format,
quality=int(spec.quality),
optimize=True)
content = ContentFile(imgfile.read())
newfile.close()
name = str(self._imgfield)
self._imgfield.storage.delete(name)
self._imgfield.storage.save(name, content)
if clear_cache and self._imgfield != '':
self._clear_cache()
self._pre_cache()
def delete(self):
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
self._clear_cache()
models.Model.delete(self)

View File

@@ -1,23 +0,0 @@
# Imagekit options
from imagekit import processors
from imagekit.specs import ImageSpec
class Options(object):
""" Class handling per-model imagekit options
"""
image_field = 'image'
crop_horz_field = 'crop_horz'
crop_vert_field = 'crop_vert'
preprocessor_spec = None
cache_dir = 'cache'
save_count_as = None
cache_filename_format = "%(filename)s_%(specname)s.%(extension)s"
admin_thumbnail_spec = 'admin_thumbnail'
spec_module = 'imagekit.defaults'
def __init__(self, opts):
for key, value in opts.__dict__.iteritems():
setattr(self, key, value)
self.specs = []

View File

@@ -1,134 +0,0 @@
""" Imagekit Image "ImageProcessors"
A processor defines a set of class variables (optional) and a
class method named "process" which processes the supplied image using
the class properties as settings. The process method can be overridden as well allowing user to define their
own effects/processes entirely.
"""
from imagekit.lib import *
class ImageProcessor(object):
""" Base image processor class """
@classmethod
def process(cls, image, obj=None):
return image
class Adjustment(ImageProcessor):
color = 1.0
brightness = 1.0
contrast = 1.0
sharpness = 1.0
@classmethod
def process(cls, image, obj=None):
for name in ['Color', 'Brightness', 'Contrast', 'Sharpness']:
factor = getattr(cls, name.lower())
if factor != 1.0:
image = getattr(ImageEnhance, name)(image).enhance(factor)
return image
class Reflection(ImageProcessor):
background_color = '#FFFFFF'
size = 0.0
opacity = 0.6
@classmethod
def process(cls, image, obj=None):
# convert bgcolor string to rgb value
background_color = ImageColor.getrgb(cls.background_color)
# copy orignial image and flip the orientation
reflection = image.copy().transpose(Image.FLIP_TOP_BOTTOM)
# create a new image filled with the bgcolor the same size
background = Image.new("RGB", image.size, background_color)
# calculate our alpha mask
start = int(255 - (255 * cls.opacity)) # The start of our gradient
steps = int(255 * cls.size) # the number of intermedite values
increment = (255 - start) / float(steps)
mask = Image.new('L', (1, 255))
for y in range(255):
if y < steps:
val = int(y * increment + start)
else:
val = 255
mask.putpixel((0, y), val)
alpha_mask = mask.resize(image.size)
# merge the reflection onto our background color using the alpha mask
reflection = Image.composite(background, reflection, alpha_mask)
# crop the reflection
reflection_height = int(image.size[1] * cls.size)
reflection = reflection.crop((0, 0, image.size[0], reflection_height))
# create new image sized to hold both the original image and the reflection
composite = Image.new("RGB", (image.size[0], image.size[1]+reflection_height), background_color)
# paste the orignal image and the reflection into the composite image
composite.paste(image, (0, 0))
composite.paste(reflection, (0, image.size[1]))
# return the image complete with reflection effect
return composite
class Resize(ImageProcessor):
width = None
height = None
crop = False
upscale = False
@classmethod
def process(cls, image, obj=None):
cur_width, cur_height = image.size
if cls.crop:
crop_horz = getattr(obj, obj._ik.crop_horz_field, 1)
crop_vert = getattr(obj, obj._ik.crop_vert_field, 1)
ratio = max(float(cls.width)/cur_width, float(cls.height)/cur_height)
resize_x, resize_y = ((cur_width * ratio), (cur_height * ratio))
crop_x, crop_y = (abs(cls.width - resize_x), abs(cls.height - resize_y))
x_diff, y_diff = (int(crop_x / 2), int(crop_y / 2))
box_left, box_right = {
0: (0, cls.width),
1: (int(x_diff), int(x_diff + cls.width)),
2: (int(crop_x), int(resize_x)),
}[crop_horz]
box_upper, box_lower = {
0: (0, cls.height),
1: (int(y_diff), int(y_diff + cls.height)),
2: (int(crop_y), int(resize_y)),
}[crop_vert]
box = (box_left, box_upper, box_right, box_lower)
image = image.resize((int(resize_x), int(resize_y)), Image.ANTIALIAS).crop(box)
else:
if not cls.width is None and not cls.height is None:
ratio = min(float(cls.width)/cur_width,
float(cls.height)/cur_height)
else:
if cls.width is None:
ratio = float(cls.height)/cur_height
else:
ratio = float(cls.width)/cur_width
new_dimensions = (int(round(cur_width*ratio)),
int(round(cur_height*ratio)))
if new_dimensions[0] > cur_width or \
new_dimensions[1] > cur_height:
if not cls.upscale:
return image
image = image.resize(new_dimensions, Image.ANTIALIAS)
return image
class Transpose(ImageProcessor):
""" Rotates or flips the image
Method should be one of the following strings:
- FLIP_LEFT RIGHT
- FLIP_TOP_BOTTOM
- ROTATE_90
- ROTATE_270
- ROTATE_180
"""
method = 'FLIP_LEFT_RIGHT'
@classmethod
def process(cls, image, obj=None):
return image.transpose(getattr(Image, cls.method))

View File

@@ -1,119 +0,0 @@
""" ImageKit image specifications
All imagekit specifications must inherit from the ImageSpec class. Models
inheriting from ImageModel will be modified with a descriptor/accessor for each
spec found.
"""
import os
from StringIO import StringIO
from imagekit.lib import *
from imagekit.utils import img_to_fobj
from django.core.files.base import ContentFile
class ImageSpec(object):
pre_cache = False
quality = 70
increment_count = False
processors = []
@classmethod
def name(cls):
return getattr(cls, 'access_as', cls.__name__.lower())
@classmethod
def process(cls, image, obj):
processed_image = image.copy()
for proc in cls.processors:
processed_image = proc.process(processed_image, obj)
return processed_image
class Accessor(object):
def __init__(self, obj, spec):
self._img = None
self._obj = obj
self.spec = spec
def _get_imgfile(self):
format = self._img.format or 'JPEG'
if format != 'JPEG':
imgfile = img_to_fobj(self._img, format)
else:
imgfile = img_to_fobj(self._img, format,
quality=int(self.spec.quality),
optimize=True)
return imgfile
def _create(self):
if self._exists():
return
# process the original image file
fp = self._obj._imgfield.storage.open(self._obj._imgfield.name)
fp.seek(0)
fp = StringIO(fp.read())
try:
self._img = self.spec.process(Image.open(fp), self._obj)
# save the new image to the cache
content = ContentFile(self._get_imgfile().read())
self._obj._imgfield.storage.save(self.name, content)
except IOError:
pass
def _delete(self):
self._obj._imgfield.storage.delete(self.name)
def _exists(self):
return self._obj._imgfield.storage.exists(self.name)
def _basename(self):
filename, extension = \
os.path.splitext(os.path.basename(self._obj._imgfield.name))
return self._obj._ik.cache_filename_format % \
{'filename': filename,
'specname': self.spec.name(),
'extension': extension.lstrip('.')}
@property
def name(self):
return os.path.join(self._obj._ik.cache_dir, self._basename())
@property
def url(self):
self._create()
if self.spec.increment_count:
fieldname = self._obj._ik.save_count_as
if fieldname is not None:
current_count = getattr(self._obj, fieldname)
setattr(self._obj, fieldname, current_count + 1)
self._obj.save(clear_cache=False)
return self._obj._imgfield.storage.url(self.name)
@property
def file(self):
self._create()
return self._obj._imgfield.storage.open(self.name)
@property
def image(self):
if self._img is None:
self._create()
if self._img is None:
self._img = Image.open(self.file)
return self._img
@property
def width(self):
return self.image.size[0]
@property
def height(self):
return self.image.size[1]
class Descriptor(object):
def __init__(self, spec):
self._spec = spec
def __get__(self, obj, type=None):
return Accessor(obj, self._spec)

View File

@@ -1,86 +0,0 @@
import os
import tempfile
import unittest
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import models
from django.test import TestCase
from imagekit import processors
from imagekit.models import ImageModel
from imagekit.specs import ImageSpec
from imagekit.lib import Image
class ResizeToWidth(processors.Resize):
width = 100
class ResizeToHeight(processors.Resize):
height = 100
class ResizeToFit(processors.Resize):
width = 100
height = 100
class ResizeCropped(ResizeToFit):
crop = ('center', 'center')
class TestResizeToWidth(ImageSpec):
access_as = 'to_width'
processors = [ResizeToWidth]
class TestResizeToHeight(ImageSpec):
access_as = 'to_height'
processors = [ResizeToHeight]
class TestResizeCropped(ImageSpec):
access_as = 'cropped'
processors = [ResizeCropped]
class TestPhoto(ImageModel):
""" Minimal ImageModel class for testing """
image = models.ImageField(upload_to='images')
class IKOptions:
spec_module = 'imagekit.tests'
class IKTest(TestCase):
""" Base TestCase class """
def setUp(self):
# create a test image using tempfile and PIL
self.tmp = tempfile.TemporaryFile()
Image.new('RGB', (800, 600)).save(self.tmp, 'JPEG')
self.tmp.seek(0)
self.p = TestPhoto()
self.p.image.save(os.path.basename('test.jpg'),
ContentFile(self.tmp.read()))
self.p.save()
# destroy temp file
self.tmp.close()
def test_setup(self):
self.assertEqual(self.p.image.width, 800)
self.assertEqual(self.p.image.height, 600)
def test_to_width(self):
self.assertEqual(self.p.to_width.width, 100)
self.assertEqual(self.p.to_width.height, 75)
def test_to_height(self):
self.assertEqual(self.p.to_height.width, 133)
self.assertEqual(self.p.to_height.height, 100)
def test_crop(self):
self.assertEqual(self.p.cropped.width, 100)
self.assertEqual(self.p.cropped.height, 100)
def test_url(self):
tup = (settings.MEDIA_URL, self.p._ik.cache_dir, 'test_to_width.jpg')
self.assertEqual(self.p.to_width.url, "%s%s/%s" % tup)
def tearDown(self):
# make sure image file is deleted
path = self.p.image.path
self.p.delete()
self.failIf(os.path.isfile(path))

View File

@@ -1,15 +0,0 @@
""" ImageKit utility functions """
import tempfile
def img_to_fobj(img, format, **kwargs):
tmp = tempfile.TemporaryFile()
if format != 'JPEG':
try:
img.save(tmp, format, **kwargs)
return
except KeyError:
pass
img.save(tmp, format, **kwargs)
tmp.seek(0)
return tmp

View File

@@ -1,8 +1,8 @@
import sys import sys
# link localsettings to this file for use on expo computer in austria # This is the local settings for use with the docker compose dev setup. It is imported automatically
DATABASES = { DATABASES = {
'default': { 'default': {
'ENGINE': 'django.db.backends.mysql', # 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'ENGINE': 'django.db.backends.mysql', # 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME' : 'troggle', # Or path to database file if using sqlite3. 'NAME' : 'troggle', # Or path to database file if using sqlite3.
'USER' : 'troggleuser', # Not used with sqlite3. 'USER' : 'troggleuser', # Not used with sqlite3.
@@ -12,6 +12,8 @@ DATABASES = {
} }
} }
ALLOWED_HOSTS = ['*']
EXPOUSER = 'expo' EXPOUSER = 'expo'
EXPOUSERPASS = 'somepasshere' EXPOUSERPASS = 'somepasshere'
EXPOUSER_EMAIL = 'wookey@wookware.org' EXPOUSER_EMAIL = 'wookey@wookware.org'
@@ -47,19 +49,12 @@ MEDIA_URL = URL_ROOT + DIR_ROOT + 'site_media/'
MEDIA_ROOT = REPOS_ROOT_PATH + '/troggle/media/' MEDIA_ROOT = REPOS_ROOT_PATH + '/troggle/media/'
MEDIA_ADMIN_DIR = '/usr/lib/python2.7/site-packages/django/contrib/admin/media/' MEDIA_ADMIN_DIR = '/usr/lib/python2.7/site-packages/django/contrib/admin/media/'
STATIC_URL = URL_ROOT STATIC_URL = "/static/"
STATIC_ROOT = DIR_ROOT STATIC_ROOT = "/expo/static"
JSLIB_URL = URL_ROOT + 'javascript/' JSLIB_URL = URL_ROOT + 'javascript/'
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/' TINY_MCE_MEDIA_ROOT = STATIC_ROOT + '/tiny_mce/'
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + '/tinymce_media/' TINY_MCE_MEDIA_URL = STATIC_ROOT + '/tiny_mce/'
TEMPLATE_DIRS = (
PYTHON_PATH + "templates",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
LOGFILE = PYTHON_PATH + 'troggle_log.txt' LOGFILE = PYTHON_PATH + 'troggle_log.txt'

View File

@@ -15,6 +15,8 @@ DATABASES = {
} }
} }
ALLOWED_HOSTS = ['*']
REPOS_ROOT_PATH = '/home/expo/' REPOS_ROOT_PATH = '/home/expo/'
sys.path.append(REPOS_ROOT_PATH) sys.path.append(REPOS_ROOT_PATH)
sys.path.append(REPOS_ROOT_PATH + 'troggle') sys.path.append(REPOS_ROOT_PATH + 'troggle')
@@ -53,13 +55,6 @@ JSLIB_PATH = '/usr/share/javascript/'
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/' TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/'
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + 'tinymce_media/' TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + 'tinymce_media/'
TEMPLATE_DIRS = (
PYTHON_PATH + "templates",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
LOGFILE = '/home/expo/troggle/troggle_log.txt' LOGFILE = '/home/expo/troggle/troggle_log.txt'
FEINCMS_ADMIN_MEDIA='/site_media/feincms/' FEINCMS_ADMIN_MEDIA='/site_media/feincms/'

View File

@@ -1,6 +1,6 @@
import sys import sys
# This is an example file. Copy it to localsettings.py, set the # This is an example file. Copy it to localsettings.py, set the
# password and _don't_ check that file back to the repo as it exposes # password and _don't_ check that file back to the repo as it exposes
# your/our password to the world! # your/our password to the world!
DATABASES = { DATABASES = {
@@ -14,6 +14,8 @@ DATABASES = {
} }
} }
ALLOWED_HOSTS = ['*']
EXPOUSER = 'expo' EXPOUSER = 'expo'
EXPOUSERPASS = 'realpasshere' EXPOUSERPASS = 'realpasshere'
EXPOUSER_EMAIL = 'wookey@wookware.org' EXPOUSER_EMAIL = 'wookey@wookware.org'
@@ -52,15 +54,8 @@ MEDIA_ADMIN_DIR = '/usr/lib/python2.7/site-packages/django/contrib/admin/media/'
JSLIB_URL = URL_ROOT + 'javascript/' JSLIB_URL = URL_ROOT + 'javascript/'
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/' TINY_MCE_MEDIA_ROOT = STATIC_ROOT + '/tiny_mce/'
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + 'tinymce_media/' TINY_MCE_MEDIA_URL = STATIC_ROOT + '/tiny_mce/'
TEMPLATE_DIRS = (
PYTHON_PATH + "templates",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
LOGFILE = '/home/expo/troggle/troggle_log.txt' LOGFILE = '/home/expo/troggle/troggle_log.txt'

View File

@@ -2,7 +2,7 @@ import sys
# link localsettings to this file for use on expo computer in austria # link localsettings to this file for use on expo computer in austria
DATABASES = { DATABASES = {
'default': { 'default': {
'ENGINE': 'django.db.backends.mysql', # 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'ENGINE': 'django.db.backends.mysql', # 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME' : 'troggle', # Or path to database file if using sqlite3. 'NAME' : 'troggle', # Or path to database file if using sqlite3.
'USER' : 'expo', # Not used with sqlite3. 'USER' : 'expo', # Not used with sqlite3.
@@ -12,6 +12,8 @@ DATABASES = {
} }
} }
ALLOWED_HOSTS = ['*']
EXPOUSER = 'expo' EXPOUSER = 'expo'
EXPOUSERPASS = 'realpasshere' EXPOUSERPASS = 'realpasshere'
EXPOUSER_EMAIL = 'wookey@wookware.org' EXPOUSER_EMAIL = 'wookey@wookware.org'
@@ -57,11 +59,4 @@ JSLIB_URL = URL_ROOT + 'javascript/'
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/' TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/'
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + '/tinymce_media/' TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + '/tinymce_media/'
TEMPLATE_DIRS = (
PYTHON_PATH + "templates",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
LOGFILE = PYTHON_PATH + 'troggle_log.txt' LOGFILE = PYTHON_PATH + 'troggle_log.txt'

View File

@@ -9,6 +9,8 @@ DATABASES = {
} }
} }
ALLOWED_HOSTS = ['*']
EXPOUSER = 'expo' EXPOUSER = 'expo'
EXPOUSERPASS = 'realpasshere' EXPOUSERPASS = 'realpasshere'
EXPOUSER_EMAIL = 'wookey@wookware.org' EXPOUSER_EMAIL = 'wookey@wookware.org'
@@ -30,7 +32,7 @@ URL_ROOT = 'http://127.0.0.1:8000'
DIR_ROOT = ''#this should end in / if a value is given DIR_ROOT = ''#this should end in / if a value is given
PUBLIC_SITE = False PUBLIC_SITE = False
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/' TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/'
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + 'tinymce_media/' TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + 'tinymce_media/'
PYTHON_PATH = 'C:\\expoweb\\troggle\\' PYTHON_PATH = 'C:\\expoweb\\troggle\\'
@@ -56,14 +58,3 @@ EMAIL_USE_TLS = True
# URL that handles the media served from MEDIA_ROOT. Make sure to use a # URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases). # trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/" # Examples: "http://media.lawrence.com", "http://example.com/media/"
TEMPLATE_DIRS = (
"C:/Expo/expoweb/troggle/templates",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)

View File

@@ -1,52 +0,0 @@
body {
all: initial;
font-size: 100%;
}
div#inputf {
display: inline-block;
width: 300px;
text-align: justify;
margin-top: 0px;
margin-bottom: 5px
}
.menu, ul#links{
display: none;
}
table {
border-spacing: 0;
width: 100%;
border: 1px solid #ddd;
font-family: monospace;
}
th {
cursor: pointer;
background-color: #bbb
}
th, td {
padding: 16px;
max-height: 40px;
}
tr:nth-child(even) {
background-color: #f2f2f2
}
p {
margin-right: 80px;
margin-left: 80px;
}
button {
width: 300px
}
span#mono {
font-family: monospace;
background-color: #eee;
font-size: 120%;
}

View File

@@ -29,12 +29,12 @@
} }
function redirectSurvey(){ function redirectSurvey(){
window.location = "{{ settings.URL_ROOT }}/survey/" + document.getElementById("expeditionChooser").value + "%23" + document.getElementById("surveyChooser").value; window.location = "{{ URL_ROOT }}/survey/" + document.getElementById("expeditionChooser").value + "%23" + document.getElementById("surveyChooser").value;
document.getElementById("progressTableContent").style.display='hidden' document.getElementById("progressTableContent").style.display='hidden'
} }
function redirectYear(){ function redirectYear(){
window.location = "{{ settings.URL_ROOT }}/survey/" + document.getElementById("expeditionChooser").value + "%23" window.location = "{{ URL_ROOT }}/survey/" + document.getElementById("expeditionChooser").value + "%23"
} }

View File

@@ -1,159 +0,0 @@
function filterTable(tablename)
{
table = document.getElementById(tablename);
mindepth = document.getElementById("CaveDepthMin").value;
maxdepth = document.getElementById("CaveDepthMax").value;
if(mindepth==0)mindepth=-999999;
if(maxdepth==0)maxdepth= 999999;
minlength = document.getElementById("CaveLengthMin").value;
maxlength = document.getElementById("CaveLengthMax").value;
if(minlength==0)minlength=-999999;
if(maxlength==0)maxlength= 999999;
visitdate = document.getElementById("VisitDate").value;
visitor = document.getElementById("Visitor").value;
cavename = document.getElementById("CaveName").value.toLowerCase();
incomplete = document.getElementById("Incomplete").checked;
var regexmode = false;
if(visitor[0]=='/' && visitor[visitor.length-1]=='/')
{
regexmode = true;
visitor = new RegExp(visitor.substr(1,visitor.length-2));
}
else
{
visitor.toLowerCase();
}
rows = table.rows;
for(i=1; i< rows.length; i++)
{
name = (rows[i].getElementsByTagName("TD")[1]).innerHTML.toLowerCase();
depth = (rows[i].getElementsByTagName("TD")[2]).innerHTML.toLowerCase();
depth = Number(depth.replace(/[^0-9.]/g,''));
length = (rows[i].getElementsByTagName("TD")[3]).innerHTML.toLowerCase();
length = Number(length.replace(/[^0-9.]/g,''));
date = (rows[i].getElementsByTagName("TD")[4]).innerHTML.toLowerCase();
//recentvisitor = (rows[i].getElementsByTagName("TD")[4]).innerHTML.toLowerCase();
recentvisitor = ""
if(cavename != "" && !name.includes(cavename))
{
rows[i].style.visibility = "collapse";
}
if(depth<mindepth || depth>maxdepth)
{
rows[i].style.visibility = "collapse";
}
if(length<minlength || length>maxlength)
{
rows[i].style.visibility = "collapse";
}
if(date < visitdate)
{
rows[i].style.visibility = "collapse";
}
if(visitor != "" && regexmode && !visitor.test(recentvisitor))
{
rows[i].style.visibility = "collapse";
}
if(visitor != "" && !regexmode && !recentvisitor.includes(visitor))
{
rows[i].style.visibility = "collapse";
}
crow=rows[i].getElementsByTagName("TD");
for(var j=0; j<crow.length; j++)
{
if(crow[j].innerHTML == "" && incomplete)
{
rows[i].style.visibility = "collapse";
break;
}
}
}
}
function filterTableReset(tablename)
{
table = document.getElementById(tablename);
rows = table.rows;
for(i=1; i< rows.length; i++)
{
rows[i].style.visibility = "visible";
}
}
function isOrdered(kvarray,numeric)
{
for(var i=0;i<kvarray.length-1;i++)
{
if(numeric==1 && Number(kvarray[i][0])>Number(kvarray[i+1][0]))
{
return false;
}
if(numeric!=1 && kvarray[i][0]>kvarray[i+1][0])
{
return false;
}
}
return true;
}
function sortTable(n, tablename, numeric) {
table = document.getElementById(tablename);
rows = table.rows;
var ordering = [];
var i;
//construct key-value pairs for sorting
for(i = 1; i < rows.length; i++) //remember header rows
{
key = rows[i].getElementsByTagName("TD")[n];
key = key.innerHTML.toLowerCase();
if(numeric==1)
{
key=key.replace(/[^0-9.]/g,'')
}
ordering.push([key,i]);
}
var ascending = isOrdered(ordering,numeric);
//sort either numerically or alphabetically
if(numeric==1)
{
ordering.sort((x,y) => Number(x[0])-Number(y[0]));
}
else
{
ordering.sort(); //sorts alphabetically
}
if(ascending) ordering.reverse();
for(i = 0; i < ordering.length; i++) //add sorted list at the end of the table
{
var keyval = ordering[i];
id = keyval[1]; //get rownumber of n^th sorted value
cln = rows[id].cloneNode(true); //deep clone of current node
table.insertBefore(cln,null); //add n^th row at the end
}
for(i = 1; i < ordering.length+1; i++) //remove unsorted nodes
{
table.deleteRow(1);// 0 -> header; 1 -> first row
}
}

View File

@@ -46,4 +46,4 @@ def _resolves(url):
return True return True
except http.Http404: except http.Http404:
return False return False

View File

@@ -17,20 +17,20 @@ def parseCaveQMs(cave,inputFile):
try: try:
steinBr=Cave.objects.get(official_name="Steinbr&uuml;ckenh&ouml;hle") steinBr=Cave.objects.get(official_name="Steinbr&uuml;ckenh&ouml;hle")
except Cave.DoesNotExist: except Cave.DoesNotExist:
print "Steinbruckenhoehle is not in the database. Please run parsers.cavetab first." print("Steinbruckenhoehle is not in the database. Please run parsers.cavetab first.")
return return
elif cave=='hauch': elif cave=='hauch':
try: try:
hauchHl=Cave.objects.get(official_name="Hauchh&ouml;hle") hauchHl=Cave.objects.get(official_name="Hauchh&ouml;hle")
except Cave.DoesNotExist: except Cave.DoesNotExist:
print "Hauchhoele is not in the database. Please run parsers.cavetab first." print("Hauchhoele is not in the database. Please run parsers.cavetab first.")
return return
elif cave =='kh': elif cave =='kh':
try: try:
kh=Cave.objects.get(official_name="Kaninchenh&ouml;hle") kh=Cave.objects.get(official_name="Kaninchenh&ouml;hle")
except Cave.DoesNotExist: except Cave.DoesNotExist:
print "KH is not in the database. Please run parsers.cavetab first." print("KH is not in the database. Please run parsers.cavetab first.")
parse_KH_QMs(kh, inputFile=inputFile) parse_KH_QMs(kh, inputFile=inputFile)
return return
qmPath = settings.EXPOWEB+inputFile qmPath = settings.EXPOWEB+inputFile
@@ -46,9 +46,9 @@ def parseCaveQMs(cave,inputFile):
if cave=='stein': if cave=='stein':
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 204", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":steinBr}) placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 204", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":steinBr})
elif cave=='hauch': elif cave=='hauch':
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl}) placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl})
if hadToCreate: if hadToCreate:
print cave+" placeholder logbook entry for " + str(year) + " added to database" print(cave + " placeholder logbook entry for " + str(year) + " added to database")
QMnum=re.match(r".*?-\d*?-X?(?P<numb>\d*)",line[0]).group("numb") QMnum=re.match(r".*?-\d*?-X?(?P<numb>\d*)",line[0]).group("numb")
newQM = QM() newQM = QM()
newQM.found_by=placeholder newQM.found_by=placeholder
@@ -59,7 +59,7 @@ def parseCaveQMs(cave,inputFile):
newQM.grade=line[1] newQM.grade=line[1]
newQM.area=line[2] newQM.area=line[2]
newQM.location_description=line[3] newQM.location_description=line[3]
newQM.completion_description=line[4] newQM.completion_description=line[4]
newQM.nearest_station_description=line[5] newQM.nearest_station_description=line[5]
if newQM.completion_description: # Troggle checks if QMs are completed by checking if they have a ticked_off_by trip. In the table, completion is indicated by the presence of a completion discription. if newQM.completion_description: # Troggle checks if QMs are completed by checking if they have a ticked_off_by trip. In the table, completion is indicated by the presence of a completion discription.
@@ -71,19 +71,18 @@ def parseCaveQMs(cave,inputFile):
if preexistingQM.new_since_parsing==False: #if the pre-existing QM has not been modified, overwrite it if preexistingQM.new_since_parsing==False: #if the pre-existing QM has not been modified, overwrite it
preexistingQM.delete() preexistingQM.delete()
newQM.save() newQM.save()
print "overwriting " + str(preexistingQM) +"\r", print("overwriting " + str(preexistingQM) +"\r")
else: # otherwise, print that it was ignored else: # otherwise, print that it was ignored
print "preserving "+ str(preexistingQM) + ", which was edited in admin \r", print("preserving " + str(preexistingQM) + ", which was edited in admin \r")
except QM.DoesNotExist: #if there is no pre-existing QM, save the new one except QM.DoesNotExist: #if there is no pre-existing QM, save the new one
newQM.save() newQM.save()
print "QM "+str(newQM) + ' added to database\r', print("QM "+str(newQM) + ' added to database\r')
except KeyError: #check on this one except KeyError: #check on this one
continue continue
except IndexError: except IndexError:
print "Index error in " + str(line) print("Index error in " + str(line))
continue continue
def parse_KH_QMs(kh, inputFile): def parse_KH_QMs(kh, inputFile):
@@ -104,14 +103,15 @@ def parse_KH_QMs(kh, inputFile):
} }
nonLookupArgs={ nonLookupArgs={
'grade':res['grade'], 'grade':res['grade'],
'nearest_station':res['nearest_station'], 'nearest_station_name':res['nearest_station'],
'location_description':res['description'] 'location_description':res['description']
} }
save_carefully(QM,lookupArgs,nonLookupArgs) save_carefully(QM,lookupArgs,nonLookupArgs)
parseCaveQMs(cave='stein',inputFile=r"1623/204/qm.csv") parseCaveQMs(cave='stein',inputFile=r"1623/204/qm.csv")
parseCaveQMs(cave='hauch',inputFile=r"1623/234/qm.csv") parseCaveQMs(cave='hauch',inputFile=r"1623/234/qm.csv")
parseCaveQMs(cave='kh', inputFile="1623/161/qmtodo.htm") parseCaveQMs(cave='kh', inputFile="1623/161/qmtodo.htm")
#parseCaveQMs(cave='balkonhoehle',inputFile=r"1623/264/qm.csv") #parseCaveQMs(cave='balkonhoehle',inputFile=r"1623/264/qm.csv")

View File

@@ -6,16 +6,18 @@ import re
def readcaves(): def readcaves():
newArea = models.Area(short_name = "1623", parent = None)
newArea.save() # Clear the cave data issues as we are reloading
newArea = models.Area(short_name = "1626", parent = None) models.DataIssue.objects.filter(parser='caves').delete()
newArea.save()
print("Reading Entrances") area_1623 = models.Area.objects.update_or_create(short_name = "1623", parent = None)
area_1626 = models.Area.objects.update_or_create(short_name = "1626", parent = None)
print(" - Reading Entrances")
#print "list of <Slug> <Filename>" #print "list of <Slug> <Filename>"
for filename in os.walk(settings.ENTRANCEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files for filename in os.walk(settings.ENTRANCEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files
if filename.endswith('.html'): if filename.endswith('.html'):
readentrance(filename) readentrance(filename)
print ("Reading Caves") print (" - Reading Caves")
for filename in os.walk(settings.CAVEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files for filename in os.walk(settings.CAVEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files
if filename.endswith('.html'): if filename.endswith('.html'):
readcave(filename) readcave(filename)
@@ -51,7 +53,7 @@ def readentrance(filename):
bearings = getXML(entrancecontents, "bearings", maxItems = 1, context = context) bearings = getXML(entrancecontents, "bearings", maxItems = 1, context = context)
url = getXML(entrancecontents, "url", maxItems = 1, context = context) url = getXML(entrancecontents, "url", maxItems = 1, context = context)
if len(non_public) == 1 and len(slugs) >= 1 and len(name) >= 1 and len(entrance_description) == 1 and len(explorers) == 1 and len(map_description) == 1 and len(location_description) == 1 and len(approach) == 1 and len(underground_description) == 1 and len(marking) == 1 and len(marking_comment) == 1 and len(findability) == 1 and len(findability_description) == 1 and len(alt) == 1 and len(northing) == 1 and len(easting) == 1 and len(tag_station) == 1 and len(exact_station) == 1 and len(other_station) == 1 and len(other_description) == 1 and len(bearings) == 1 and len(url) == 1: if len(non_public) == 1 and len(slugs) >= 1 and len(name) >= 1 and len(entrance_description) == 1 and len(explorers) == 1 and len(map_description) == 1 and len(location_description) == 1 and len(approach) == 1 and len(underground_description) == 1 and len(marking) == 1 and len(marking_comment) == 1 and len(findability) == 1 and len(findability_description) == 1 and len(alt) == 1 and len(northing) == 1 and len(easting) == 1 and len(tag_station) == 1 and len(exact_station) == 1 and len(other_station) == 1 and len(other_description) == 1 and len(bearings) == 1 and len(url) == 1:
e = models.Entrance(name = name[0], e, state = models.Entrance.objects.update_or_create(name = name[0],
non_public = {"True": True, "False": False, "true": True, "false": False,}[non_public[0]], non_public = {"True": True, "False": False, "true": True, "false": False,}[non_public[0]],
entrance_description = entrance_description[0], entrance_description = entrance_description[0],
explorers = explorers[0], explorers = explorers[0],
@@ -75,14 +77,12 @@ def readentrance(filename):
url = url[0], url = url[0],
filename = filename, filename = filename,
cached_primary_slug = slugs[0]) cached_primary_slug = slugs[0])
e.save()
primary = True primary = True
for slug in slugs: for slug in slugs:
#print slug, filename #print slug, filename
cs = models.EntranceSlug(entrance = e, cs = models.EntranceSlug.objects.update_or_create(entrance = e,
slug = slug, slug = slug,
primary = primary) primary = primary)
cs.save()
primary = False primary = False
def readcave(filename): def readcave(filename):
@@ -117,7 +117,7 @@ def readcave(filename):
url = getXML(cavecontents, "url", maxItems = 1, context = context) url = getXML(cavecontents, "url", maxItems = 1, context = context)
entrances = getXML(cavecontents, "entrance", context = context) entrances = getXML(cavecontents, "entrance", context = context)
if len(non_public) == 1 and len(slugs) >= 1 and len(official_name) == 1 and len(areas) >= 1 and len(kataster_code) == 1 and len(kataster_number) == 1 and len(unofficial_number) == 1 and len(explorers) == 1 and len(underground_description) == 1 and len(equipment) == 1 and len(references) == 1 and len(survey) == 1 and len(kataster_status) == 1 and len(underground_centre_line) == 1 and len(notes) == 1 and len(length) == 1 and len(depth) == 1 and len(extent) == 1 and len(survex_file) == 1 and len(description_file ) == 1 and len(url) == 1 and len(entrances) >= 1: if len(non_public) == 1 and len(slugs) >= 1 and len(official_name) == 1 and len(areas) >= 1 and len(kataster_code) == 1 and len(kataster_number) == 1 and len(unofficial_number) == 1 and len(explorers) == 1 and len(underground_description) == 1 and len(equipment) == 1 and len(references) == 1 and len(survey) == 1 and len(kataster_status) == 1 and len(underground_centre_line) == 1 and len(notes) == 1 and len(length) == 1 and len(depth) == 1 and len(extent) == 1 and len(survex_file) == 1 and len(description_file ) == 1 and len(url) == 1 and len(entrances) >= 1:
c = models.Cave(non_public = {"True": True, "False": False, "true": True, "false": False,}[non_public[0]], c, state = models.Cave.objects.update_or_create(non_public = {"True": True, "False": False, "true": True, "false": False,}[non_public[0]],
official_name = official_name[0], official_name = official_name[0],
kataster_code = kataster_code[0], kataster_code = kataster_code[0],
kataster_number = kataster_number[0], kataster_number = kataster_number[0],
@@ -137,7 +137,6 @@ def readcave(filename):
description_file = description_file[0], description_file = description_file[0],
url = url[0], url = url[0],
filename = filename) filename = filename)
c.save()
for area_slug in areas: for area_slug in areas:
area = models.Area.objects.filter(short_name = area_slug) area = models.Area.objects.filter(short_name = area_slug)
if area: if area:
@@ -149,33 +148,40 @@ def readcave(filename):
primary = True primary = True
for slug in slugs: for slug in slugs:
try: try:
cs = models.CaveSlug(cave = c, cs = models.CaveSlug.objects.update_or_create(cave = c,
slug = slug, slug = slug,
primary = primary) primary = primary)
cs.save()
except: except:
print("Can't find text (slug): %s, skipping %s" % (slug, context)) message = "Can't find text (slug): %s, skipping %s" % (slug, context)
models.DataIssue.objects.create(parser='caves', message=message)
print(message)
primary = False primary = False
for entrance in entrances: for entrance in entrances:
slug = getXML(entrance, "entranceslug", maxItems = 1, context = context)[0] slug = getXML(entrance, "entranceslug", maxItems = 1, context = context)[0]
letter = getXML(entrance, "letter", maxItems = 1, context = context)[0] letter = getXML(entrance, "letter", maxItems = 1, context = context)[0]
try: try:
entrance = models.Entrance.objects.get(entranceslug__slug = slug) entrance = models.Entrance.objects.get(entranceslug__slug = slug)
ce = models.CaveAndEntrance(cave = c, entrance_letter = letter, entrance = entrance) ce = models.CaveAndEntrance.objects.update_or_create(cave = c, entrance_letter = letter, entrance = entrance)
ce.save()
except: except:
print ("Entrance text (slug) %s missing %s" % (slug, context)) message = "Entrance text (slug) %s missing %s" % (slug, context)
models.DataIssue.objects.create(parser='caves', message=message)
print(message)
def getXML(text, itemname, minItems = 1, maxItems = None, printwarnings = True, context = ""): def getXML(text, itemname, minItems = 1, maxItems = None, printwarnings = True, context = ""):
items = re.findall("<%(itemname)s>(.*?)</%(itemname)s>" % {"itemname": itemname}, text, re.S) items = re.findall("<%(itemname)s>(.*?)</%(itemname)s>" % {"itemname": itemname}, text, re.S)
if len(items) < minItems and printwarnings: if len(items) < minItems and printwarnings:
print("%(count)i %(itemname)s found, at least %(min)i expected" % {"count": len(items), message = "%(count)i %(itemname)s found, at least %(min)i expected" % {"count": len(items),
"itemname": itemname, "itemname": itemname,
"min": minItems} + context) "min": minItems} + context
models.DataIssue.objects.create(parser='caves', message=message)
print(message)
if maxItems is not None and len(items) > maxItems and printwarnings: if maxItems is not None and len(items) > maxItems and printwarnings:
print("%(count)i %(itemname)s found, no more than %(max)i expected" % {"count": len(items), message = "%(count)i %(itemname)s found, no more than %(max)i expected" % {"count": len(items),
"itemname": itemname, "itemname": itemname,
"max": maxItems} + context) "max": maxItems} + context
models.DataIssue.objects.create(parser='caves', message=message)
print(message)
return items return items

View File

@@ -1,213 +0,0 @@
import troggle.core.models as models #import models for various objects
from django.conf import settings
import xml.etree.ElementTree as ET #this is used to parse XML's
import subprocess
import re
#
# This parser has to find several things:
# There are files of .html format in expoweb area - they contain some of the important information
# There is a similar number of .svx files in loser are - they contain all the measurements
#
# Previous version was incredibly slow due to various shitty ideas about finding things
# and overelayance on python when handling regular expressions, new version delegates heavy lifting to shell
# and handles more sophisticated bits only
#
def load():
print('Hi! I\'m caves parser. Ready to work')
print('Loading caves of 1623 area')
loadarea('1623')
print('Loading caves of 1626 area')
loadarea('1626')
def loadarea(areacode):
if not file_exists(settings.SURVEX_DATA+'1623-and-1626.3d'):
print('Computing master .3d file')
bash('cavern -o'+settings.SURVEX_DATA+' '+settings.SURVEX_DATA+'1623-and-1626.svx')
else:
print('Loading from existing master .3d file')
master3d = bash('dump3d -d '+settings.SURVEX_DATA+'1623-and-1626.3d').splitlines()
master3dN = [x for x in master3d if ('NODE' in x)] #list of nodes of master survex file
master3dL = [x for x in master3d if ('LINE' in x)] #list of nodes of master survex file
print('Searching all cave dirs files')
basedir = settings.SURVEX_DATA+'caves-'+areacode+'/'
cavedirs = bash("find "+basedir+" -maxdepth 1 -type d").splitlines() #this command finds all directories
print('Obtained list of directories! (#dirs='+str(len(cavedirs))+')')
ndirs = len(cavedirs) #remember number of dirs for nice debug output
for cavedir in cavedirs:
if cavedir==basedir:
continue #skip the basedir - a non-proper subdirectory
cavename = bash('echo '+cavedir+' | rev | cut -f1 -d \'/\' | rev').splitlines()[0] #get final bit of the directory
test = bash('if [ ! -f '+cavedir+'/'+cavename+'.svx ] ; then echo MISSING; fi')#test for file exisence
if not file_exists(cavedir+'/'+cavename+'.svx'):
msg = models.Parser_messageM(parsername='caves',content=cavedir+'/'+cavename+' MISSING!',message_type='warn')
print('Cave missing'+cavename+' :(')
msg.save()
continue
fullname=cavedir+'/'+cavename+'.svx'
print('Found cave:'+cavename)
cavernout = bash('cavern -o '+cavedir+' '+fullname) #make cavern process the thing
if 'cavern: error:' in cavernout:
msg = models.Parser_messageM(parsername='caves',content=cavedir+'/'+cavename+' Survex file messed up!',message_type='warn')
print('Fucked svx'+cavename+' :(')
msg.save()
continue
cavernout = cavernout.splitlines()
depth = float(([x for x in cavernout if ('Total vertical length' in x)][0].split()[-1])[:-2])
length = float(([x for x in cavernout if ('Total length' in x)][0].split()[6])[:-1])
cavefile = open(fullname,'r')
cavefilecontents = cavefile.read().splitlines()
surveyname = [x for x in cavefilecontents if ('*begin ') in x][0].split()[1].lower()
try:
title = [x for x in cavefilecontents if ('*title ') in x][0].split()[1]
except:
syrveyname = "Untitled"
relevant_nodes = [x for x in master3dN if (('['+areacode+'.'+surveyname+'.' in x) or ('['+areacode+'.'+surveyname+']' in x))]
entrance_nodes = [x for x in relevant_nodes if 'ENTRANCE' in x]
surface_nodes = [x for x in relevant_nodes if 'SURFACE' in x]
location_nodes = []
print('rel_nodes'+str(len(relevant_nodes)))
if len(entrance_nodes) > 0:
location_nodes = entrance_nodes
elif len(surface_nodes) > 0:
location_nodes = surface_nodes
elif len(relevant_nodes) > 0:
location_nodes = relevant_nodes
try:
location = sorted(location_nodes, key = lambda y : float(y.split()[3])).pop()
except:
print(location_nodes)
location = 'Not found'
relevant_lines = [x for x in master3dL if (('['+areacode+'.'+surveyname+'.' in x) or ('['+areacode+'.'+surveyname+']' in x))]
try:
lastleg = sorted(relevant_lines, key = lambda y : y.split().pop()).pop()
except:
lastleg = ['LINE 1900.01.01']
try:
lastdate = lastleg.split().pop()
if 'STYLE' in lastdate:
lastdate = lastleg.split().pop().pop()
except:
lastdate = '1900.01.01'
entrance = ' '.join(location.split()[1:3])
print((('depth','length','surv name','entr','date'),(depth,length,surveyname,entrance,lastdate))) #sanity check print
newcave = models.CaveM(
survex_file = fullname,
total_length = length,
name=areacode+'.'+surveyname,
total_depth = depth,
date = lastdate,
entrance = entrance)
newcave.save()
#end of reading survex masterfiles
print ("Reading cave descriptions")
cavefiles = bash('find '+settings.CAVEDESCRIPTIONS+' -name \'*.html\'').splitlines()
for fn in cavefiles:
f = open(fn, "r")
print(fn)
contents = f.read()
slug = re.sub(r"\s+", "", extractXML(contents,'caveslug'))
desc = extractXML(contents,'underground_description')
name = slug[5:] #get survex compatible name
area = slug[0:4]
print([area,name])
if desc==None or name==None:
msg = models.Parser_messageM(parsername='caves',content=fn+' Description meesed up!',message_type='warn')
print('Fucked description '+fn+' :(')
msg.save()
continue
print(area+'/'+name+'/'+name+'.svx')
updatecave = models.CaveM.objects.filter(survex_file__icontains=area+'/'+name+'/'+name+'.svx')
if len(updatecave)>1:
print('Non unique solution - skipping. Name:'+name)
elif len(updatecave)==0:
print('Cave with no survex data:'+name)
continue
else: #exaclty one match
print('Adding desc:'+name)
updatecave = updatecave[0]
updatecave.description = '/cave/descriptionM/'+slug #area-name
updatecave.title=name
updatecave.save()
slugS = slug
explorersS = extractXML(contents,'explorers')
underground_descriptionS = extractXML(contents,'underground_description')
equipmentS = extractXML(contents,'equipment')
referencesS = extractXML(contents,'references')
surveyS = extractXML(contents,'survey')
kataster_statusS = extractXML(contents,'kataster_status')
underground_centre_lineS = extractXML(contents,'underground_centre_line')
survex_fileS = extractXML(contents,'survex_file')
notesS = extractXML(contents,'notes')
newcavedesc = models.Cave_descriptionM(
slug = slugS,
explorers = explorersS,
underground_description = underground_descriptionS,
equipment = equipmentS,
references = referencesS,
survey = surveyS,
kataster_status = kataster_statusS,
underground_centre_line = underground_centre_lineS,
survex_file = survex_fileS,
notes = notesS)
newcavedesc.save()
#end of reading cave descriptions
def file_exists(filename):
test = bash('if [ ! -f '+filename+' ] ; then echo MISSING; fi')#test for file exisence
if 'MISSING' in test: #send error message to the database
return False
return True
def extractXML(contents,tag):
#find correct lines
lines = contents.splitlines()
beg = [x for x in lines if ('<'+tag+'>' in x)]
end = [x for x in lines if ('</'+tag+'>' in x)]
if (not beg) or (not end):
return None
begi = lines.index(beg[0])
endi = lines.index(end[0])
if endi!=begi:
segment = '\n'.join(lines[begi:endi+1])
else:
segment = lines[begi:endi+1][0]
hit = re.findall('<'+tag+'>(.*)</'+tag+'>', segment, re.S)[0]
return hit
def bash(cmd): #calls command in bash shell, returns output
process = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
output, error = process.communicate()
return output

View File

@@ -7,15 +7,18 @@ from parsers.people import GetPersonExpeditionNameLookup
from parsers.cavetab import GetCaveLookup from parsers.cavetab import GetCaveLookup
from django.template.defaultfilters import slugify from django.template.defaultfilters import slugify
from django.utils.timezone import get_current_timezone
from django.utils.timezone import make_aware
import csv import csv
import re import re
import datetime import datetime
import os import os
from fuzzywuzzy import fuzz
from utils import save_carefully from utils import save_carefully
# #
# When we edit logbook entries, allow a "?" after any piece of data to say we've frigged it and # When we edit logbook entries, allow a "?" after any piece of data to say we've frigged it and
# it can be checked up later from the hard-copy if necessary; or it's not possible to determin (name, trip place, etc) # it can be checked up later from the hard-copy if necessary; or it's not possible to determin (name, trip place, etc)
# #
@@ -23,19 +26,33 @@ from utils import save_carefully
# #
# the logbook loading section # the logbook loading section
# #
def GetTripPersons(trippeople, expedition, logtime_underground): def GetTripPersons(trippeople, expedition, logtime_underground):
res = [ ] res = [ ]
author = None author = None
for tripperson in re.split(",|\+|&amp;|&(?!\w+;)| and ", trippeople): round_bracket_regex = re.compile(r"[\(\[].*?[\)\]]")
for tripperson in re.split(r",|\+|&amp;|&(?!\w+;)| and ", trippeople):
tripperson = tripperson.strip() tripperson = tripperson.strip()
mul = re.match("<u>(.*?)</u>$(?i)", tripperson) tripperson = tripperson.strip('.')
mul = re.match(r"<u>(.*?)</u>$(?i)", tripperson)
if mul: if mul:
tripperson = mul.group(1).strip() tripperson = mul.group(1).strip()
if tripperson and tripperson[0] != '*': if tripperson and tripperson[0] != '*':
#assert tripperson in personyearmap, "'%s' << %s\n\n %s" % (tripperson, trippeople, personyearmap) #assert tripperson in personyearmap, "'%s' << %s\n\n %s" % (tripperson, trippeople, personyearmap)
tripperson = re.sub(round_bracket_regex, "", tripperson).strip()
personyear = GetPersonExpeditionNameLookup(expedition).get(tripperson.lower()) personyear = GetPersonExpeditionNameLookup(expedition).get(tripperson.lower())
if not personyear: if not personyear:
print "NoMatchFor: '%s'" % tripperson print(" - No name match for: '%s'" % tripperson)
message = "No name match for: '%s' in year '%s'" % (tripperson, expedition.year)
models.DataIssue.objects.create(parser='logbooks', message=message)
print(' - Lets try something fuzzy')
fuzzy_matches = {}
for person in GetPersonExpeditionNameLookup(expedition):
fuzz_num = fuzz.ratio(tripperson.lower(), person)
if fuzz_num > 50:
#print(" - %s -> %s = %d" % (tripperson.lower(), person, fuzz_num))
fuzzy_matches[person] = fuzz_num
for i in sorted(fuzzy_matches.items(), key = lambda kv:(kv[1]), reverse=True):
print(' - %s -> %s' % (i[0], i[1]))
res.append((personyear, logtime_underground)) res.append((personyear, logtime_underground))
if mul: if mul:
author = personyear author = personyear
@@ -45,7 +62,7 @@ def GetTripPersons(trippeople, expedition, logtime_underground):
author = res[-1][0] author = res[-1][0]
return res, author return res, author
def GetTripCave(place): #need to be fuzzier about matching here. Already a very slow function... def GetTripCave(place): #need to be fuzzier about matching here. Already a very slow function...
# print "Getting cave for " , place # print "Getting cave for " , place
try: try:
katastNumRes=[] katastNumRes=[]
@@ -65,34 +82,36 @@ def GetTripCave(place): #need to be fuzzier about matching h
return tripCaveRes return tripCaveRes
elif len(tripCaveRes)>1: elif len(tripCaveRes)>1:
print "Ambiguous place " + str(place) + " entered. Choose from " + str(tripCaveRes) print("Ambiguous place " + str(place) + " entered. Choose from " + str(tripCaveRes))
correctIndex=input("type list index of correct cave") correctIndex=input("type list index of correct cave")
return tripCaveRes[correctIndex] return tripCaveRes[correctIndex]
else: else:
print "No cave found for place " , place print("No cave found for place " , place)
return return
noncaveplaces = [ "Journey", "Loser Plateau" ] noncaveplaces = [ "Journey", "Loser Plateau" ]
def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground): def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground, entry_type="wiki"):
""" saves a logbook entry and related persontrips """ """ saves a logbook entry and related persontrips """
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground) trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground)
if not author: if not author:
print "skipping logentry", title print(" - Skipping logentry: " + title + " - no author for entry")
message = "Skipping logentry: %s - no author for entry in year '%s'" % (title, expedition.year)
models.DataIssue.objects.create(parser='logbooks', message=message)
return return
# tripCave = GetTripCave(place) #tripCave = GetTripCave(place)
#
lplace = place.lower() lplace = place.lower()
if lplace not in noncaveplaces: if lplace not in noncaveplaces:
cave=GetCaveLookup().get(lplace) cave=GetCaveLookup().get(lplace)
#Check for an existing copy of the current entry, and save #Check for an existing copy of the current entry, and save
expeditionday = expedition.get_expedition_day(date) expeditionday = expedition.get_expedition_day(date)
lookupAttribs={'date':date, 'title':title} lookupAttribs={'date':date, 'title':title}
nonLookupAttribs={'place':place, 'text':text, 'expedition':expedition, 'cave':cave, 'slug':slugify(title)[:50]} nonLookupAttribs={'place':place, 'text':text, 'expedition':expedition, 'cave':cave, 'slug':slugify(title)[:50], 'entry_type':entry_type}
lbo, created=save_carefully(models.LogbookEntry, lookupAttribs, nonLookupAttribs) lbo, created=save_carefully(models.LogbookEntry, lookupAttribs, nonLookupAttribs)
for tripperson, time_underground in trippersons: for tripperson, time_underground in trippersons:
lookupAttribs={'personexpedition':tripperson, 'logbook_entry':lbo} lookupAttribs={'personexpedition':tripperson, 'logbook_entry':lbo}
nonLookupAttribs={'time_underground':time_underground, 'is_logbook_entry_author':(tripperson == author)} nonLookupAttribs={'time_underground':time_underground, 'is_logbook_entry_author':(tripperson == author)}
@@ -102,8 +121,8 @@ def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_
def ParseDate(tripdate, year): def ParseDate(tripdate, year):
""" Interprets dates in the expo logbooks and returns a correct datetime.date object """ """ Interprets dates in the expo logbooks and returns a correct datetime.date object """
mdatestandard = re.match("(\d\d\d\d)-(\d\d)-(\d\d)", tripdate) mdatestandard = re.match(r"(\d\d\d\d)-(\d\d)-(\d\d)", tripdate)
mdategoof = re.match("(\d\d?)/0?(\d)/(20|19)?(\d\d)", tripdate) mdategoof = re.match(r"(\d\d?)/0?(\d)/(20|19)?(\d\d)", tripdate)
if mdatestandard: if mdatestandard:
assert mdatestandard.group(1) == year, (tripdate, year) assert mdatestandard.group(1) == year, (tripdate, year)
year, month, day = int(mdatestandard.group(1)), int(mdatestandard.group(2)), int(mdatestandard.group(3)) year, month, day = int(mdatestandard.group(1)), int(mdatestandard.group(2)), int(mdatestandard.group(3))
@@ -113,11 +132,11 @@ def ParseDate(tripdate, year):
day, month, year = int(mdategoof.group(1)), int(mdategoof.group(2)), int(mdategoof.group(4)) + yadd day, month, year = int(mdategoof.group(1)), int(mdategoof.group(2)), int(mdategoof.group(4)) + yadd
else: else:
assert False, tripdate assert False, tripdate
return datetime.date(year, month, day) return make_aware(datetime.datetime(year, month, day), get_current_timezone())
# 2007, 2008, 2006 # 2006, 2008 - 2010
def Parselogwikitxt(year, expedition, txt): def Parselogwikitxt(year, expedition, txt):
trippara = re.findall("===(.*?)===([\s\S]*?)(?====)", txt) trippara = re.findall(r"===(.*?)===([\s\S]*?)(?====)", txt)
for triphead, triptext in trippara: for triphead, triptext in trippara:
tripheadp = triphead.split("|") tripheadp = triphead.split("|")
#print "ttt", tripheadp #print "ttt", tripheadp
@@ -126,7 +145,7 @@ def Parselogwikitxt(year, expedition, txt):
tripsplace = tripplace.split(" - ") tripsplace = tripplace.split(" - ")
tripcave = tripsplace[0].strip() tripcave = tripsplace[0].strip()
tul = re.findall("T/?U:?\s*(\d+(?:\.\d*)?|unknown)\s*(hrs|hours)?", triptext) tul = re.findall(r"T/?U:?\s*(\d+(?:\.\d*)?|unknown)\s*(hrs|hours)?", triptext)
if tul: if tul:
#assert len(tul) <= 1, (triphead, triptext) #assert len(tul) <= 1, (triphead, triptext)
#assert tul[0][1] in ["hrs", "hours"], (triphead, triptext) #assert tul[0][1] in ["hrs", "hours"], (triphead, triptext)
@@ -140,12 +159,16 @@ def Parselogwikitxt(year, expedition, txt):
#print "\n", tripcave, "--- ppp", trippeople, len(triptext) #print "\n", tripcave, "--- ppp", trippeople, len(triptext)
EnterLogIntoDbase(date = ldate, place = tripcave, title = tripplace, text = triptext, trippeople=trippeople, expedition=expedition, logtime_underground=0) EnterLogIntoDbase(date = ldate, place = tripcave, title = tripplace, text = triptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
# 2002, 2004, 2005 # 2002, 2004, 2005, 2007, 2011 - 2018
def Parseloghtmltxt(year, expedition, txt): def Parseloghtmltxt(year, expedition, txt):
tripparas = re.findall("<hr\s*/>([\s\S]*?)(?=<hr)", txt) #print(" - Starting log html parser")
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
logbook_entry_count = 0
for trippara in tripparas: for trippara in tripparas:
#print(" - HR detected - maybe a trip?")
s = re.match('''(?x)(?:\s*<div\sclass="tripdate"\sid=".*?">.*?</div>\s*<p>)? # second date logbook_entry_count += 1
s = re.match(r'''(?x)(?:\s*<div\sclass="tripdate"\sid=".*?">.*?</div>\s*<p>)? # second date
\s*(?:<a\s+id="(.*?)"\s*/>\s*</a>)? \s*(?:<a\s+id="(.*?)"\s*/>\s*</a>)?
\s*<div\s+class="tripdate"\s*(?:id="(.*?)")?>(.*?)</div>(?:<p>)? \s*<div\s+class="tripdate"\s*(?:id="(.*?)")?>(.*?)</div>(?:<p>)?
\s*<div\s+class="trippeople">\s*(.*?)</div> \s*<div\s+class="trippeople">\s*(.*?)</div>
@@ -155,46 +178,46 @@ def Parseloghtmltxt(year, expedition, txt):
\s*$ \s*$
''', trippara) ''', trippara)
if not s: if not s:
if not re.search("Rigging Guide", trippara): if not re.search(r"Rigging Guide", trippara):
print "can't parse: ", trippara # this is 2007 which needs editing print("can't parse: ", trippara) # this is 2007 which needs editing
#assert s, trippara #assert s, trippara
continue continue
tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups() tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups()
ldate = ParseDate(tripdate.strip(), year) ldate = ParseDate(tripdate.strip(), year)
#assert tripid[:-1] == "t" + tripdate, (tripid, tripdate)
trippeople = re.sub("Ol(?!l)", "Olly", trippeople)
trippeople = re.sub("Wook(?!e)", "Wookey", trippeople)
triptitles = triptitle.split(" - ") triptitles = triptitle.split(" - ")
if len(triptitles) >= 2: if len(triptitles) >= 2:
tripcave = triptitles[0] tripcave = triptitles[0]
else: else:
tripcave = "UNKNOWN" tripcave = "UNKNOWN"
#print "\n", tripcave, "--- ppp", trippeople, len(triptext) #print("\n", tripcave, "--- ppp", trippeople, len(triptext))
ltriptext = re.sub("</p>", "", triptext) ltriptext = re.sub(r"</p>", "", triptext)
ltriptext = re.sub("\s*?\n\s*", " ", ltriptext) ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
ltriptext = re.sub("<p>", "\n\n", ltriptext).strip() ltriptext = re.sub(r"<p>", "\n\n", ltriptext).strip()
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext, trippeople=trippeople, expedition=expedition, logtime_underground=0) EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext,
trippeople=trippeople, expedition=expedition, logtime_underground=0,
entry_type="html")
if logbook_entry_count == 0:
print(" - No trip entrys found in logbook, check the syntax matches htmltxt format")
# main parser for pre-2001. simpler because the data has been hacked so much to fit it # main parser for 1991 - 2001. simpler because the data has been hacked so much to fit it
def Parseloghtml01(year, expedition, txt): def Parseloghtml01(year, expedition, txt):
tripparas = re.findall("<hr[\s/]*>([\s\S]*?)(?=<hr)", txt) tripparas = re.findall(r"<hr[\s/]*>([\s\S]*?)(?=<hr)", txt)
for trippara in tripparas: for trippara in tripparas:
s = re.match(u"(?s)\s*(?:<p>)?(.*?)</?p>(.*)$(?i)", trippara) s = re.match(u"(?s)\s*(?:<p>)?(.*?)</?p>(.*)$(?i)", trippara)
assert s, trippara[:300] assert s, trippara[:300]
tripheader, triptext = s.group(1), s.group(2) tripheader, triptext = s.group(1), s.group(2)
mtripid = re.search('<a id="(.*?)"', tripheader) mtripid = re.search(r'<a id="(.*?)"', tripheader)
tripid = mtripid and mtripid.group(1) or "" tripid = mtripid and mtripid.group(1) or ""
tripheader = re.sub("</?(?:[ab]|span)[^>]*>", "", tripheader) tripheader = re.sub(r"</?(?:[ab]|span)[^>]*>", "", tripheader)
#print " ", [tripheader] #print " ", [tripheader]
#continue #continue
tripdate, triptitle, trippeople = tripheader.split("|") tripdate, triptitle, trippeople = tripheader.split("|")
ldate = ParseDate(tripdate.strip(), year) ldate = ParseDate(tripdate.strip(), year)
mtu = re.search('<p[^>]*>(T/?U.*)', triptext) mtu = re.search(r'<p[^>]*>(T/?U.*)', triptext)
if mtu: if mtu:
tu = mtu.group(1) tu = mtu.group(1)
triptext = triptext[:mtu.start(0)] + triptext[mtu.end():] triptext = triptext[:mtu.start(0)] + triptext[mtu.end():]
@@ -205,39 +228,40 @@ def Parseloghtml01(year, expedition, txt):
tripcave = triptitles[0].strip() tripcave = triptitles[0].strip()
ltriptext = triptext ltriptext = triptext
mtail = re.search('(?:<a href="[^"]*">[^<]*</a>|\s|/|-|&amp;|</?p>|\((?:same day|\d+)\))*$', ltriptext) mtail = re.search(r'(?:<a href="[^"]*">[^<]*</a>|\s|/|-|&amp;|</?p>|\((?:same day|\d+)\))*$', ltriptext)
if mtail: if mtail:
#print mtail.group(0) #print mtail.group(0)
ltriptext = ltriptext[:mtail.start(0)] ltriptext = ltriptext[:mtail.start(0)]
ltriptext = re.sub("</p>", "", ltriptext) ltriptext = re.sub(r"</p>", "", ltriptext)
ltriptext = re.sub("\s*?\n\s*", " ", ltriptext) ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
ltriptext = re.sub("<p>|<br>", "\n\n", ltriptext).strip() ltriptext = re.sub(r"<p>|<br>", "\n\n", ltriptext).strip()
#ltriptext = re.sub("[^\s0-9a-zA-Z\-.,:;'!]", "NONASCII", ltriptext) #ltriptext = re.sub("[^\s0-9a-zA-Z\-.,:;'!]", "NONASCII", ltriptext)
ltriptext = re.sub("</?u>", "_", ltriptext) ltriptext = re.sub(r"</?u>", "_", ltriptext)
ltriptext = re.sub("</?i>", "''", ltriptext) ltriptext = re.sub(r"</?i>", "''", ltriptext)
ltriptext = re.sub("</?b>", "'''", ltriptext) ltriptext = re.sub(r"</?b>", "'''", ltriptext)
#print ldate, trippeople.strip() #print ldate, trippeople.strip()
# could includ the tripid (url link for cross referencing) # could includ the tripid (url link for cross referencing)
EnterLogIntoDbase(date=ldate, place=tripcave, title=triptitle, text=ltriptext, trippeople=trippeople, expedition=expedition, logtime_underground=0) EnterLogIntoDbase(date=ldate, place=tripcave, title=triptitle, text=ltriptext,
trippeople=trippeople, expedition=expedition, logtime_underground=0,
entry_type="html")
# parser for 2003
def Parseloghtml03(year, expedition, txt): def Parseloghtml03(year, expedition, txt):
tripparas = re.findall("<hr\s*/>([\s\S]*?)(?=<hr)", txt) tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
for trippara in tripparas: for trippara in tripparas:
s = re.match(u"(?s)\s*<p>(.*?)</p>(.*)$", trippara) s = re.match(u"(?s)\s*<p>(.*?)</p>(.*)$", trippara)
assert s, trippara assert s, trippara
tripheader, triptext = s.group(1), s.group(2) tripheader, triptext = s.group(1), s.group(2)
tripheader = re.sub("&nbsp;", " ", tripheader) tripheader = re.sub(r"&nbsp;", " ", tripheader)
tripheader = re.sub("\s+", " ", tripheader).strip() tripheader = re.sub(r"\s+", " ", tripheader).strip()
sheader = tripheader.split(" -- ") sheader = tripheader.split(" -- ")
tu = "" tu = ""
if re.match("T/U|Time underwater", sheader[-1]): if re.match("T/U|Time underwater", sheader[-1]):
tu = sheader.pop() tu = sheader.pop()
if len(sheader) != 3: if len(sheader) != 3:
print "header not three pieces", sheader print("header not three pieces", sheader)
tripdate, triptitle, trippeople = sheader tripdate, triptitle, trippeople = sheader
ldate = ParseDate(tripdate.strip(), year) ldate = ParseDate(tripdate.strip(), year)
triptitles = triptitle.split(" , ") triptitles = triptitle.split(" , ")
@@ -246,37 +270,14 @@ def Parseloghtml03(year, expedition, txt):
else: else:
tripcave = "UNKNOWN" tripcave = "UNKNOWN"
#print tripcave, "--- ppp", triptitle, trippeople, len(triptext) #print tripcave, "--- ppp", triptitle, trippeople, len(triptext)
ltriptext = re.sub("</p>", "", triptext) ltriptext = re.sub(r"</p>", "", triptext)
ltriptext = re.sub("\s*?\n\s*", " ", ltriptext) ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
ltriptext = re.sub("<p>", "\n\n", ltriptext).strip() ltriptext = re.sub(r"<p>", "\n\n", ltriptext).strip()
ltriptext = re.sub("[^\s0-9a-zA-Z\-.,:;'!&()\[\]<>?=+*%]", "_NONASCII_", ltriptext) ltriptext = re.sub(r"[^\s0-9a-zA-Z\-.,:;'!&()\[\]<>?=+*%]", "_NONASCII_", ltriptext)
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext, trippeople=trippeople, expedition=expedition, logtime_underground=0) EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle,
text = ltriptext, trippeople=trippeople, expedition=expedition,
logtime_underground=0, entry_type="html")
yearlinks = [
# ("2013", "2013/logbook.html", Parseloghtmltxt),
("2012", "2012/logbook.html", Parseloghtmltxt),
("2011", "2011/logbook.html", Parseloghtmltxt),
("2010", "2010/logbook.html", Parselogwikitxt),
("2009", "2009/2009logbook.txt", Parselogwikitxt),
("2008", "2008/2008logbook.txt", Parselogwikitxt),
("2007", "2007/logbook.html", Parseloghtmltxt),
("2006", "2006/logbook/logbook_06.txt", Parselogwikitxt),
("2005", "2005/logbook.html", Parseloghtmltxt),
("2004", "2004/logbook.html", Parseloghtmltxt),
("2003", "2003/logbook.html", Parseloghtml03),
("2002", "2002/logbook.html", Parseloghtmltxt),
("2001", "2001/log.htm", Parseloghtml01),
("2000", "2000/log.htm", Parseloghtml01),
("1999", "1999/log.htm", Parseloghtml01),
("1998", "1998/log.htm", Parseloghtml01),
("1997", "1997/log.htm", Parseloghtml01),
("1996", "1996/log.htm", Parseloghtml01),
("1995", "1995/log.htm", Parseloghtml01),
("1994", "1994/log.htm", Parseloghtml01),
("1993", "1993/log.htm", Parseloghtml01),
("1992", "1992/log.htm", Parseloghtml01),
("1991", "1991/log.htm", Parseloghtml01),
]
def SetDatesFromLogbookEntries(expedition): def SetDatesFromLogbookEntries(expedition):
""" """
@@ -295,54 +296,67 @@ def SetDatesFromLogbookEntries(expedition):
persontrip.persontrip_next = None persontrip.persontrip_next = None
lprevpersontrip = persontrip lprevpersontrip = persontrip
persontrip.save() persontrip.save()
def LoadLogbookForExpedition(expedition): def LoadLogbookForExpedition(expedition):
""" Parses all logbook entries for one expedition """ """ Parses all logbook entries for one expedition """
expowebbase = os.path.join(settings.EXPOWEB, "years") expowebbase = os.path.join(settings.EXPOWEB, "years")
year = str(expedition.year) yearlinks = settings.LOGBOOK_PARSER_SETTINGS
for lyear, lloc, parsefunc in yearlinks:
if lyear == year: logbook_parseable = False
break
fin = open(os.path.join(expowebbase, lloc)) if expedition.year in yearlinks:
print "opennning", lloc year_settings = yearlinks[expedition.year]
txt = fin.read().decode("latin1") file_in = open(os.path.join(expowebbase, year_settings[0]))
fin.close() txt = file_in.read().decode("latin1")
parsefunc(year, expedition, txt) file_in.close()
SetDatesFromLogbookEntries(expedition) parsefunc = year_settings[1]
return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count()) logbook_parseable = True
print(" - Parsing logbook: " + year_settings[0] + "\n - Using parser: " + year_settings[1])
else:
try:
file_in = open(os.path.join(expowebbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE))
txt = file_in.read().decode("latin1")
file_in.close()
logbook_parseable = True
print("No set parser found using default")
parsefunc = settings.DEFAULT_LOGBOOK_PARSER
except (IOError):
logbook_parseable = False
print("Couldn't open default logbook file and nothing in settings for expo " + expedition.year)
if logbook_parseable:
parser = globals()[parsefunc]
parser(expedition.year, expedition, txt)
SetDatesFromLogbookEntries(expedition)
#return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count())
def LoadLogbooks(): def LoadLogbooks():
""" This is the master function for parsing all logbooks into the Troggle database. Requires yearlinks, which is a list of tuples for each expedition with expedition year, logbook path, and parsing function. """ """ This is the master function for parsing all logbooks into the Troggle database. """
#Deletion has been moved to a seperate function to enable the non-destructive importing
#models.LogbookEntry.objects.all().delete()
expowebbase = os.path.join(settings.EXPOWEB, "years")
#yearlinks = [ ("2001", "2001/log.htm", Parseloghtml01), ] #overwrite
#yearlinks = [ ("1996", "1996/log.htm", Parseloghtml01),] # overwrite
for year, lloc, parsefunc in yearlinks: # Clear the logbook data issues as we are reloading
# This will not work until the corresponding year exists in the database. models.DataIssue.objects.filter(parser='logbooks').delete()
# In 2012 this needed noscript/folk.csv to be updated first. # Fetch all expos
expedition = models.Expedition.objects.filter(year = year)[0] expos = models.Expedition.objects.all()
fin = open(os.path.join(expowebbase, lloc)) for expo in expos:
txt = fin.read().decode("latin1") print("\nLoading Logbook for: " + expo.year)
fin.close()
parsefunc(year, expedition, txt)
SetDatesFromLogbookEntries(expedition)
dateRegex = re.compile('<span\s+class="date">(\d\d\d\d)-(\d\d)-(\d\d)</span>', re.S) # Load logbook for expo
expeditionYearRegex = re.compile('<span\s+class="expeditionyear">(.*?)</span>', re.S) LoadLogbookForExpedition(expo)
titleRegex = re.compile('<H1>(.*?)</H1>', re.S)
reportRegex = re.compile('<div\s+class="report">(.*)</div>\s*</body>', re.S)
personRegex = re.compile('<div\s+class="person">(.*?)</div>', re.S) dateRegex = re.compile(r'<span\s+class="date">(\d\d\d\d)-(\d\d)-(\d\d)</span>', re.S)
nameAuthorRegex = re.compile('<span\s+class="name(,author|)">(.*?)</span>', re.S) expeditionYearRegex = re.compile(r'<span\s+class="expeditionyear">(.*?)</span>', re.S)
TURegex = re.compile('<span\s+class="TU">([0-9]*\.?[0-9]+)</span>', re.S) titleRegex = re.compile(r'<H1>(.*?)</H1>', re.S)
locationRegex = re.compile('<span\s+class="location">(.*?)</span>', re.S) reportRegex = re.compile(r'<div\s+class="report">(.*)</div>\s*</body>', re.S)
caveRegex = re.compile('<span\s+class="cave">(.*?)</span>', re.S) personRegex = re.compile(r'<div\s+class="person">(.*?)</div>', re.S)
nameAuthorRegex = re.compile(r'<span\s+class="name(,author|)">(.*?)</span>', re.S)
TURegex = re.compile(r'<span\s+class="TU">([0-9]*\.?[0-9]+)</span>', re.S)
locationRegex = re.compile(r'<span\s+class="location">(.*?)</span>', re.S)
caveRegex = re.compile(r'<span\s+class="cave">(.*?)</span>', re.S)
def parseAutoLogBookEntry(filename): def parseAutoLogBookEntry(filename):
errors = [] errors = []
@@ -363,17 +377,17 @@ def parseAutoLogBookEntry(filename):
expedition = models.Expedition.objects.get(year = expeditionYearMatch.groups()[0]) expedition = models.Expedition.objects.get(year = expeditionYearMatch.groups()[0])
personExpeditionNameLookup = GetPersonExpeditionNameLookup(expedition) personExpeditionNameLookup = GetPersonExpeditionNameLookup(expedition)
except models.Expedition.DoesNotExist: except models.Expedition.DoesNotExist:
errors.append("Expedition not in database") errors.append("Expedition not in database")
else: else:
errors.append("Expediton Year could not be parsed") errors.append("Expediton Year could not be parsed")
titleMatch = titleRegex.search(contents) titleMatch = titleRegex.search(contents)
if titleMatch: if titleMatch:
title, = titleMatch.groups() title, = titleMatch.groups()
if len(title) > settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH: if len(title) > settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH:
errors.append("Title too long") errors.append("Title too long")
else: else:
errors.append("Title could not be found") errors.append("Title could not be found")
caveMatch = caveRegex.search(contents) caveMatch = caveRegex.search(contents)
if caveMatch: if caveMatch:
@@ -382,24 +396,24 @@ def parseAutoLogBookEntry(filename):
cave = models.getCaveByReference(caveRef) cave = models.getCaveByReference(caveRef)
except AssertionError: except AssertionError:
cave = None cave = None
errors.append("Cave not found in database") errors.append("Cave not found in database")
else: else:
cave = None cave = None
locationMatch = locationRegex.search(contents) locationMatch = locationRegex.search(contents)
if locationMatch: if locationMatch:
location, = locationMatch.groups() location, = locationMatch.groups()
else: else:
location = None location = None
if cave is None and location is None: if cave is None and location is None:
errors.append("Location nor cave could not be found") errors.append("Location nor cave could not be found")
reportMatch = reportRegex.search(contents) reportMatch = reportRegex.search(contents)
if reportMatch: if reportMatch:
report, = reportMatch.groups() report, = reportMatch.groups()
else: else:
errors.append("Contents could not be found") errors.append("Contents could not be found")
if errors: if errors:
return errors # Easiest to bail out at this point as we need to make sure that we know which expedition to look for people from. return errors # Easiest to bail out at this point as we need to make sure that we know which expedition to look for people from.
people = [] people = []
@@ -414,7 +428,7 @@ def parseAutoLogBookEntry(filename):
author = bool(author) author = bool(author)
else: else:
errors.append("Persons name could not be found") errors.append("Persons name could not be found")
TUMatch = TURegex.search(contents) TUMatch = TURegex.search(contents)
if TUMatch: if TUMatch:
TU, = TUMatch.groups() TU, = TUMatch.groups()
@@ -424,15 +438,15 @@ def parseAutoLogBookEntry(filename):
people.append((name, author, TU)) people.append((name, author, TU))
if errors: if errors:
return errors # Bail out before commiting to the database return errors # Bail out before commiting to the database
logbookEntry = models.LogbookEntry(date = date, logbookEntry = models.LogbookEntry(date = date,
expedition = expedition, expedition = expedition,
title = title, cave = cave, place = location, title = title, cave = cave, place = location,
text = report, slug = slugify(title)[:50], text = report, slug = slugify(title)[:50],
filename = filename) filename = filename)
logbookEntry.save() logbookEntry.save()
for name, author, TU in people: for name, author, TU in people:
models.PersonTrip(personexpedition = personExpo, models.PersonTrip(personexpedition = personExpo,
time_underground = TU, time_underground = TU,
logbook_entry = logbookEntry, logbook_entry = logbookEntry,
is_logbook_entry_author = author).save() is_logbook_entry_author = author).save()
print logbookEntry print(logbookEntry)

View File

@@ -4,28 +4,30 @@ from django.conf import settings
import troggle.core.models as models import troggle.core.models as models
import csv, re, datetime, os, shutil import csv, re, datetime, os, shutil
from utils import save_carefully from utils import save_carefully
from HTMLParser import HTMLParser
from unidecode import unidecode
def saveMugShot(mugShotPath, mugShotFilename, person): def saveMugShot(mugShotPath, mugShotFilename, person):
if mugShotFilename.startswith(r'i/'): #if filename in cell has the directory attached (I think they all do), remove it if mugShotFilename.startswith(r'i/'): #if filename in cell has the directory attached (I think they all do), remove it
mugShotFilename=mugShotFilename[2:] mugShotFilename=mugShotFilename[2:]
else: else:
mugShotFilename=mugShotFilename # just in case one doesn't mugShotFilename=mugShotFilename # just in case one doesn't
dummyObj=models.DPhoto(file=mugShotFilename) dummyObj=models.DPhoto(file=mugShotFilename)
#Put a copy of the file in the right place. mugShotObj.file.path is determined by the django filesystemstorage specified in models.py #Put a copy of the file in the right place. mugShotObj.file.path is determined by the django filesystemstorage specified in models.py
if not os.path.exists(dummyObj.file.path): if not os.path.exists(dummyObj.file.path):
shutil.copy(mugShotPath, dummyObj.file.path) shutil.copy(mugShotPath, dummyObj.file.path)
mugShotObj, created = save_carefully( mugShotObj, created = save_carefully(
models.DPhoto, models.DPhoto,
lookupAttribs={'is_mugshot':True, 'file':mugShotFilename}, lookupAttribs={'is_mugshot':True, 'file':mugShotFilename},
nonLookupAttribs={'caption':"Mugshot for "+person.first_name+" "+person.last_name} nonLookupAttribs={'caption':"Mugshot for "+person.first_name+" "+person.last_name}
) )
if created: if created:
mugShotObj.contains_person.add(person) mugShotObj.contains_person.add(person)
mugShotObj.save() mugShotObj.save()
def parseMugShotAndBlurb(personline, header, person): def parseMugShotAndBlurb(personline, header, person):
"""create mugshot Photo instance""" """create mugshot Photo instance"""
@@ -43,38 +45,53 @@ def parseMugShotAndBlurb(personline, header, person):
person.save() person.save()
def LoadPersonsExpos(): def LoadPersonsExpos():
persontab = open(os.path.join(settings.EXPOWEB, "noinfo", "folk.csv")) persontab = open(os.path.join(settings.EXPOWEB, "folk", "folk.csv"))
personreader = csv.reader(persontab) personreader = csv.reader(persontab)
headers = personreader.next() headers = personreader.next()
header = dict(zip(headers, range(len(headers)))) header = dict(zip(headers, range(len(headers))))
# make expeditions # make expeditions
print "Loading expeditions" print("Loading expeditions")
years = headers[5:] years = headers[5:]
for year in years: for year in years:
lookupAttribs = {'year':year} lookupAttribs = {'year':year}
nonLookupAttribs = {'name':"CUCC expo %s" % year} nonLookupAttribs = {'name':"CUCC expo %s" % year}
save_carefully(models.Expedition, lookupAttribs, nonLookupAttribs) save_carefully(models.Expedition, lookupAttribs, nonLookupAttribs)
# make persons # make persons
print "Loading personexpeditions" print("Loading personexpeditions")
for personline in personreader: for personline in personreader:
name = personline[header["Name"]] name = personline[header["Name"]]
name = re.sub("<.*?>", "", name) name = re.sub(r"<.*?>", "", name)
mname = re.match("(\w+)(?:\s((?:van |ten )?\w+))?(?:\s\(([^)]*)\))?", name)
nickname = mname.group(3) or "" firstname = ""
nickname = ""
lookupAttribs={'first_name':mname.group(1), 'last_name':(mname.group(2) or "")}
nonLookupAttribs={'is_vfho':personline[header["VfHO member"]],} rawlastname = personline[header["Lastname"]].strip()
matchlastname = re.match(r"^([\w&;\s]+)(?:\(([^)]*)\))?", rawlastname)
lastname = matchlastname.group(1).strip()
splitnick = re.match(r"^([\w&;\s]+)(?:\(([^)]*)\))?", name)
fullname = splitnick.group(1)
nickname = splitnick.group(2) or ""
fullname = fullname.strip()
names = fullname.split(' ')
firstname = names[0]
if len(names) == 1:
lastname = ""
lookupAttribs={'first_name':firstname, 'last_name':(lastname or "")}
nonLookupAttribs={'is_vfho':bool(personline[header["VfHO member"]]), 'fullname':fullname}
person, created = save_carefully(models.Person, lookupAttribs, nonLookupAttribs) person, created = save_carefully(models.Person, lookupAttribs, nonLookupAttribs)
parseMugShotAndBlurb(personline=personline, header=header, person=person) parseMugShotAndBlurb(personline=personline, header=header, person=person)
# make person expedition from table # make person expedition from table
for year, attended in zip(headers, personline)[5:]: for year, attended in zip(headers, personline)[5:]:
expedition = models.Expedition.objects.get(year=year) expedition = models.Expedition.objects.get(year=year)
@@ -83,7 +100,6 @@ def LoadPersonsExpos():
nonLookupAttribs = {'nickname':nickname, 'is_guest':(personline[header["Guest"]] == "1")} nonLookupAttribs = {'nickname':nickname, 'is_guest':(personline[header["Guest"]] == "1")}
save_carefully(models.PersonExpedition, lookupAttribs, nonLookupAttribs) save_carefully(models.PersonExpedition, lookupAttribs, nonLookupAttribs)
# used in other referencing parser functions # used in other referencing parser functions
# expedition name lookup cached for speed (it's a very big list) # expedition name lookup cached for speed (it's a very big list)
Gpersonexpeditionnamelookup = { } Gpersonexpeditionnamelookup = { }
@@ -92,34 +108,47 @@ def GetPersonExpeditionNameLookup(expedition):
res = Gpersonexpeditionnamelookup.get(expedition.name) res = Gpersonexpeditionnamelookup.get(expedition.name)
if res: if res:
return res return res
res = { } res = { }
duplicates = set() duplicates = set()
print "Calculating GetPersonExpeditionNameLookup for", expedition.year print("Calculating GetPersonExpeditionNameLookup for " + expedition.year)
personexpeditions = models.PersonExpedition.objects.filter(expedition=expedition) personexpeditions = models.PersonExpedition.objects.filter(expedition=expedition)
htmlparser = HTMLParser()
for personexpedition in personexpeditions: for personexpedition in personexpeditions:
possnames = [ ] possnames = [ ]
f = personexpedition.person.first_name.lower() f = unidecode(htmlparser.unescape(personexpedition.person.first_name.lower()))
l = personexpedition.person.last_name.lower() l = unidecode(htmlparser.unescape(personexpedition.person.last_name.lower()))
full = unidecode(htmlparser.unescape(personexpedition.person.fullname.lower()))
if l: if l:
possnames.append(f + " " + l) possnames.append(f + " " + l)
possnames.append(f + " " + l[0]) possnames.append(f + " " + l[0])
possnames.append(f + l[0]) possnames.append(f + l[0])
possnames.append(f[0] + " " + l) possnames.append(f[0] + " " + l)
possnames.append(f) possnames.append(f)
if personexpedition.nickname: if full not in possnames:
possnames.append(full)
if personexpedition.nickname not in possnames:
possnames.append(personexpedition.nickname.lower()) possnames.append(personexpedition.nickname.lower())
if l:
# This allows for nickname to be used for short name eg Phil
# adding Phil Sargent to the list
if str(personexpedition.nickname.lower() + " " + l) not in possnames:
possnames.append(personexpedition.nickname.lower() + " " + l)
if str(personexpedition.nickname.lower() + " " + l[0]) not in possnames:
possnames.append(personexpedition.nickname.lower() + " " + l[0])
if str(personexpedition.nickname.lower() + l[0]) not in possnames:
possnames.append(personexpedition.nickname.lower() + l[0])
for possname in possnames: for possname in possnames:
if possname in res: if possname in res:
duplicates.add(possname) duplicates.add(possname)
else: else:
res[possname] = personexpedition res[possname] = personexpedition
for possname in duplicates: for possname in duplicates:
del res[possname] del res[possname]
Gpersonexpeditionnamelookup[expedition.name] = res Gpersonexpeditionnamelookup[expedition.name] = res
return res return res

View File

@@ -1,27 +0,0 @@
from django.conf import settings
import troggle.core.models as models
def load():
folkfile = open(settings.EXPOWEB+"noinfo/folk.csv")
personlines = folkfile.read().splitlines()
persontable = [x.split(',') for x in personlines]
years = [persontable[0][i] for i in range(5,len(persontable[0]))]
for year in years:
newexpedition = models.ExpeditionM( date = year )
newexpedition.save()
for row in persontable[1:]: #skip header
attendedid = [i for i, x in enumerate(row) if '1' in x]
attendedyears = [persontable[0][i] for i in attendedid if i >= 5]
name = row[0]
print(name+' has attended: '+', '.join(attendedyears))
newperson = models.PersonM(
name = name)
newperson.save()
for year in attendedyears:
target = models.ExpeditionM.objects.get(date=year)
newperson.expos_attended.add( target )
print('Person -> Expo table created!')

View File

@@ -1,5 +1,7 @@
''' '''
This module is the part of troggle that parses descriptions of cave parts (subcaves) from the legacy html files and saves them in the troggle database as instances of the model Subcave. Unfortunately, this parser can not be very flexible because the legacy format is poorly structured. This module is the part of troggle that parses descriptions of cave parts (subcaves) from the legacy html
files and saves them in the troggle database as instances of the model Subcave.
Unfortunately, this parser can not be very flexible because the legacy format is poorly structured.
''' '''
import sys, os import sys, os
@@ -29,12 +31,12 @@ def importSubcaves(cave):
link[0]) link[0])
subcaveFile=open(subcaveFilePath,'r') subcaveFile=open(subcaveFilePath,'r')
description=subcaveFile.read().decode('iso-8859-1').encode('utf-8') description=subcaveFile.read().decode('iso-8859-1').encode('utf-8')
lookupAttribs={'title':link[1], 'cave':cave} lookupAttribs={'title':link[1], 'cave':cave}
nonLookupAttribs={'description':description} nonLookupAttribs={'description':description}
newSubcave=save_carefully(Subcave,lookupAttribs=lookupAttribs,nonLookupAttribs=nonLookupAttribs) newSubcave=save_carefully(Subcave,lookupAttribs=lookupAttribs,nonLookupAttribs=nonLookupAttribs)
logging.info("Added " + unicode(newSubcave) + " to " + unicode(cave)) logging.info("Added " + unicode(newSubcave) + " to " + unicode(cave))
except IOError: except IOError:
logging.info("Subcave import couldn't open "+subcaveFilePath) logging.info("Subcave import couldn't open "+subcaveFilePath)

View File

@@ -5,20 +5,26 @@ import troggle.settings as settings
from subprocess import call, Popen, PIPE from subprocess import call, Popen, PIPE
from troggle.parsers.people import GetPersonExpeditionNameLookup from troggle.parsers.people import GetPersonExpeditionNameLookup
from django.utils.timezone import get_current_timezone
from django.utils.timezone import make_aware
import re import re
import os import os
from datetime import datetime
line_leg_regex = re.compile(r"[\d\-+.]+$")
def LoadSurvexLineLeg(survexblock, stardata, sline, comment): def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
# The try catches here need replacing as they are relativly expensive
ls = sline.lower().split() ls = sline.lower().split()
ssfrom = survexblock.MakeSurvexStation(ls[stardata["from"]]) ssfrom = survexblock.MakeSurvexStation(ls[stardata["from"]])
ssto = survexblock.MakeSurvexStation(ls[stardata["to"]]) ssto = survexblock.MakeSurvexStation(ls[stardata["to"]])
survexleg = models.SurvexLeg(block=survexblock, stationfrom=ssfrom, stationto=ssto) survexleg = models.SurvexLeg(block=survexblock, stationfrom=ssfrom, stationto=ssto)
if stardata["type"] == "normal": if stardata["type"] == "normal":
try: try:
survexleg.tape = float(ls[stardata["tape"]]) survexleg.tape = float(ls[stardata["tape"]])
except ValueError: except ValueError:
print("Tape misread in", survexblock.survexfile.path) print("Tape misread in", survexblock.survexfile.path)
print("Stardata:", stardata) print("Stardata:", stardata)
print("Line:", ls) print("Line:", ls)
@@ -53,14 +59,17 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment):
survexleg.compass = 1000 survexleg.compass = 1000
survexleg.clino = -90.0 survexleg.clino = -90.0
else: else:
assert re.match(r"[\d\-+.]+$", lcompass), ls assert line_leg_regex.match(lcompass), ls
assert re.match(r"[\d\-+.]+$", lclino) and lclino != "-", ls assert line_leg_regex.match(lclino) and lclino != "-", ls
survexleg.compass = float(lcompass) survexleg.compass = float(lcompass)
survexleg.clino = float(lclino) survexleg.clino = float(lclino)
if cave:
survexleg.cave = cave
# only save proper legs # only save proper legs
survexleg.save() survexleg.save()
itape = stardata.get("tape") itape = stardata.get("tape")
if itape: if itape:
try: try:
@@ -80,96 +89,212 @@ def LoadSurvexEquate(survexblock, sline):
def LoadSurvexLinePassage(survexblock, stardata, sline, comment): def LoadSurvexLinePassage(survexblock, stardata, sline, comment):
pass pass
stardatadefault = {"type":"normal", "t":"leg", "from":0, "to":1, "tape":2, "compass":3, "clino":4} stardatadefault = {"type":"normal", "t":"leg", "from":0, "to":1, "tape":2, "compass":3, "clino":4}
stardataparamconvert = {"length":"tape", "bearing":"compass", "gradient":"clino"} stardataparamconvert = {"length":"tape", "bearing":"compass", "gradient":"clino"}
regex_comment = re.compile(r"([^;]*?)\s*(?:;\s*(.*))?\n?$")
regex_ref = re.compile(r'.*?ref.*?(\d+)\s*#\s*(\d+)')
regex_star = re.compile(r'\s*\*[\s,]*(\w+)\s*(.*?)\s*(?:;.*)?$')
regex_team = re.compile(r"(Insts|Notes|Tape|Dog|Useless|Pics|Helper|Disto|Consultant)\s+(.*)$(?i)")
regex_team_member = re.compile(r" and | / |, | & | \+ |^both$|^none$(?i)")
regex_qm = re.compile(r'^\s*QM(\d)\s+?([a-dA-DxX])\s+([\w\-]+)\.(\d+)\s+(([\w\-]+)\.(\d+)|\-)\s+(.+)$')
def RecursiveLoad(survexblock, survexfile, fin, textlines): def RecursiveLoad(survexblock, survexfile, fin, textlines):
iblankbegins = 0 iblankbegins = 0
text = [ ] text = [ ]
stardata = stardatadefault stardata = stardatadefault
teammembers = [ ] teammembers = [ ]
# uncomment to print out all files during parsing # uncomment to print out all files during parsing
print("Reading file:", survexblock.survexfile.path) print(" - Reading file: " + survexblock.survexfile.path)
while True: stamp = datetime.now()
svxline = fin.readline().decode("latin1") lineno = 0
if not svxline:
return # Try to find the cave in the DB if not use the string as before
textlines.append(svxline) path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", survexblock.survexfile.path)
if path_match:
pos_cave = '%s-%s' % (path_match.group(1), path_match.group(2))
# print('Match')
# print(pos_cave)
cave = models.getCaveByReference(pos_cave)
if cave:
survexfile.cave = cave
svxlines = ''
svxlines = fin.read().splitlines()
# print('Cave - preloop ' + str(survexfile.cave))
# print(survexblock)
for svxline in svxlines:
# print(survexblock)
# print(svxline)
# if not svxline:
# print(' - Not survex')
# return
# textlines.append(svxline)
lineno += 1
# print(' - Line: %d' % lineno)
# break the line at the comment # break the line at the comment
sline, comment = re.match(r"([^;]*?)\s*(?:;\s*(.*))?\n?$", svxline.strip()).groups() sline, comment = regex_comment.match(svxline.strip()).groups()
# detect ref line pointing to the scans directory # detect ref line pointing to the scans directory
mref = comment and re.match(r'.*?ref.*?(\d+)\s*#\s*(\d+)', comment) mref = comment and regex_ref.match(comment)
if mref: if mref:
refscan = "%s#%s" % (mref.group(1), mref.group(2)) refscan = "%s#%s" % (mref.group(1), mref.group(2))
survexscansfolders = models.SurvexScansFolder.objects.filter(walletname=refscan) survexscansfolders = models.SurvexScansFolder.objects.filter(walletname=refscan)
if survexscansfolders: if survexscansfolders:
survexblock.survexscansfolder = survexscansfolders[0] survexblock.survexscansfolder = survexscansfolders[0]
#survexblock.refscandir = "%s/%s%%23%s" % (mref.group(1), mref.group(1), mref.group(2)) #survexblock.refscandir = "%s/%s%%23%s" % (mref.group(1), mref.group(1), mref.group(2))
survexblock.save() survexblock.save()
continue continue
# This whole section should be moved if we can have *QM become a proper survex command
# Spec of QM in SVX files, currently commented out need to add to survex
# needs to match regex_qm
# ;Serial number grade(A/B/C/D/X) nearest-station resolution-station description
# ;QM1 a hobnob_hallway_2.42 hobnob-hallway_3.42 junction of keyhole passage
# ;QM1 a hobnob_hallway_2.42 - junction of keyhole passage
qmline = comment and regex_qm.match(comment)
if qmline:
print(qmline.groups())
#(u'1', u'B', u'miraclemaze', u'1.17', u'-', None, u'\tcontinuation of rift')
qm_no = qmline.group(1)
qm_grade = qmline.group(2)
qm_from_section = qmline.group(3)
qm_from_station = qmline.group(4)
qm_resolve_section = qmline.group(6)
qm_resolve_station = qmline.group(7)
qm_notes = qmline.group(8)
print('Cave - %s' % survexfile.cave)
print('QM no %d' % int(qm_no))
print('QM grade %s' % qm_grade)
print('QM section %s' % qm_from_section)
print('QM station %s' % qm_from_station)
print('QM res section %s' % qm_resolve_section)
print('QM res station %s' % qm_resolve_station)
print('QM notes %s' % qm_notes)
# If the QM isn't resolved (has a resolving station) then load it
if not qm_resolve_section or qm_resolve_section is not '-' or qm_resolve_section is not 'None':
from_section = models.SurvexBlock.objects.filter(name=qm_from_section)
# If we can find a section (survex note chunck, named)
if len(from_section) > 0:
print(from_section[0])
from_station = models.SurvexStation.objects.filter(block=from_section[0], name=qm_from_station)
# If we can find a from station then we have the nearest station and can import it
if len(from_station) > 0:
print(from_station[0])
qm = models.QM.objects.create(number=qm_no,
nearest_station=from_station[0],
grade=qm_grade.upper(),
location_description=qm_notes)
else:
print('QM found but resolved')
#print('Cave -sline ' + str(cave))
if not sline: if not sline:
continue continue
# detect the star command # detect the star command
mstar = re.match(r'\s*\*[\s,]*(\w+)\s*(.*?)\s*(?:;.*)?$', sline) mstar = regex_star.match(sline)
if not mstar: if not mstar:
if "from" in stardata: if "from" in stardata:
LoadSurvexLineLeg(survexblock, stardata, sline, comment) # print('Cave ' + str(survexfile.cave))
# print(survexblock)
LoadSurvexLineLeg(survexblock, stardata, sline, comment, survexfile.cave)
# print(' - From: ')
#print(stardata)
pass
elif stardata["type"] == "passage": elif stardata["type"] == "passage":
LoadSurvexLinePassage(survexblock, stardata, sline, comment) LoadSurvexLinePassage(survexblock, stardata, sline, comment)
# print(' - Passage: ')
#Missing "station" in stardata. #Missing "station" in stardata.
continue continue
# detect the star command # detect the star command
cmd, line = mstar.groups() cmd, line = mstar.groups()
cmd = cmd.lower() cmd = cmd.lower()
if re.match("include$(?i)", cmd): if re.match("include$(?i)", cmd):
includepath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line)) includepath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line))
includesurvexfile = models.SurvexFile(path=includepath, cave=survexfile.cave) print(' - Include file found including - ' + includepath)
# Try to find the cave in the DB if not use the string as before
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", includepath)
if path_match:
pos_cave = '%s-%s' % (path_match.group(1), path_match.group(2))
# print(pos_cave)
cave = models.getCaveByReference(pos_cave)
if cave:
survexfile.cave = cave
else:
print('No match for %s' % includepath)
includesurvexfile = models.SurvexFile(path=includepath)
includesurvexfile.save() includesurvexfile.save()
includesurvexfile.SetDirectory() includesurvexfile.SetDirectory()
if includesurvexfile.exists(): if includesurvexfile.exists():
survexblock.save()
fininclude = includesurvexfile.OpenFile() fininclude = includesurvexfile.OpenFile()
RecursiveLoad(survexblock, includesurvexfile, fininclude, textlines) RecursiveLoad(survexblock, includesurvexfile, fininclude, textlines)
elif re.match("begin$(?i)", cmd): elif re.match("begin$(?i)", cmd):
if line: if line:
newsvxpath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line))
# Try to find the cave in the DB if not use the string as before
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", newsvxpath)
if path_match:
pos_cave = '%s-%s' % (path_match.group(1), path_match.group(2))
print(pos_cave)
cave = models.getCaveByReference(pos_cave)
if cave:
survexfile.cave = cave
else:
print('No match for %s' % newsvxpath)
name = line.lower() name = line.lower()
survexblockdown = models.SurvexBlock(name=name, begin_char=fin.tell(), parent=survexblock, survexpath=survexblock.survexpath+"."+name, cave=survexblock.cave, survexfile=survexfile, totalleglength=0.0) print(' - Begin found for: ' + name)
# print('Block cave: ' + str(survexfile.cave))
survexblockdown = models.SurvexBlock(name=name, begin_char=fin.tell(), parent=survexblock, survexpath=survexblock.survexpath+"."+name, cave=survexfile.cave, survexfile=survexfile, totalleglength=0.0)
survexblockdown.save() survexblockdown.save()
survexblock.save()
survexblock = survexblockdown
# print(survexblockdown)
textlinesdown = [ ] textlinesdown = [ ]
RecursiveLoad(survexblockdown, survexfile, fin, textlinesdown) RecursiveLoad(survexblockdown, survexfile, fin, textlinesdown)
else: else:
iblankbegins += 1 iblankbegins += 1
elif re.match("end$(?i)", cmd): elif re.match("end$(?i)", cmd):
if iblankbegins: if iblankbegins:
iblankbegins -= 1 iblankbegins -= 1
else: else:
survexblock.text = "".join(textlines) survexblock.text = "".join(textlines)
survexblock.save() survexblock.save()
# print(' - End found: ')
endstamp = datetime.now()
timetaken = endstamp - stamp
# print(' - Time to process: ' + str(timetaken))
return return
elif re.match("date$(?i)", cmd): elif re.match("date$(?i)", cmd):
if len(line) == 10: if len(line) == 10:
survexblock.date = re.sub(r"\.", "-", line) #print(' - Date found: ' + line)
survexblock.date = make_aware(datetime.strptime(re.sub(r"\.", "-", line), '%Y-%m-%d'), get_current_timezone())
expeditions = models.Expedition.objects.filter(year=line[:4]) expeditions = models.Expedition.objects.filter(year=line[:4])
if expeditions: if expeditions:
assert len(expeditions) == 1 assert len(expeditions) == 1
survexblock.expedition = expeditions[0] survexblock.expedition = expeditions[0]
survexblock.expeditionday = survexblock.expedition.get_expedition_day(survexblock.date) survexblock.expeditionday = survexblock.expedition.get_expedition_day(survexblock.date)
survexblock.save() survexblock.save()
elif re.match("team$(?i)", cmd): elif re.match("team$(?i)", cmd):
mteammember = re.match(r"(Insts|Notes|Tape|Dog|Useless|Pics|Helper|Disto|Consultant)\s+(.*)$(?i)", line) pass
# print(' - Team found: ')
mteammember = regex_team.match(line)
if mteammember: if mteammember:
for tm in re.split(r" and | / |, | & | \+ |^both$|^none$(?i)", mteammember.group(2)): for tm in regex_team_member.split(mteammember.group(2)):
if tm: if tm:
personexpedition = survexblock.expedition and GetPersonExpeditionNameLookup(survexblock.expedition).get(tm.lower()) personexpedition = survexblock.expedition and GetPersonExpeditionNameLookup(survexblock.expedition).get(tm.lower())
if (personexpedition, tm) not in teammembers: if (personexpedition, tm) not in teammembers:
@@ -179,18 +304,23 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if personexpedition: if personexpedition:
personrole.person=personexpedition.person personrole.person=personexpedition.person
personrole.save() personrole.save()
elif cmd == "title": elif cmd == "title":
survextitle = models.SurvexTitle(survexblock=survexblock, title=line.strip('"'), cave=survexblock.cave) #print(' - Title found: ')
survextitle = models.SurvexTitle(survexblock=survexblock, title=line.strip('"'), cave=survexfile.cave)
survextitle.save() survextitle.save()
pass
elif cmd == "require": elif cmd == "require":
# should we check survex version available for processing? # should we check survex version available for processing?
pass pass
elif cmd == "data": elif cmd == "data":
#print(' - Data found: ')
ls = line.lower().split() ls = line.lower().split()
stardata = { "type":ls[0] } stardata = { "type":ls[0] }
#print(' - Star data: ', stardata)
#print(ls)
for i in range(0, len(ls)): for i in range(0, len(ls)):
stardata[stardataparamconvert.get(ls[i], ls[i])] = i - 1 stardata[stardataparamconvert.get(ls[i], ls[i])] = i - 1
if ls[0] in ["normal", "cartesian", "nosurvey"]: if ls[0] in ["normal", "cartesian", "nosurvey"]:
@@ -199,40 +329,23 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
stardata = stardatadefault stardata = stardatadefault
else: else:
assert ls[0] == "passage", line assert ls[0] == "passage", line
elif cmd == "equate": elif cmd == "equate":
#print(' - Equate found: ')
LoadSurvexEquate(survexblock, line) LoadSurvexEquate(survexblock, line)
elif cmd == "fix": elif cmd == "fix":
#print(' - Fix found: ')
survexblock.MakeSurvexStation(line.split()[0]) survexblock.MakeSurvexStation(line.split()[0])
else: else:
#print(' - Stuff')
if cmd not in ["sd", "include", "units", "entrance", "data", "flags", "title", "export", "instrument", if cmd not in ["sd", "include", "units", "entrance", "data", "flags", "title", "export", "instrument",
"calibrate", "set", "infer", "alias", "ref", "cs", "declination", "case"]: "calibrate", "set", "infer", "alias", "ref", "cs", "declination", "case"]:
print("Unrecognised command in line:", cmd, line, survexblock, survexblock.survexfile.path) print("Unrecognised command in line:", cmd, line, survexblock, survexblock.survexfile.path)
endstamp = datetime.now()
timetaken = endstamp - stamp
def ReloadSurvexCave(survex_cave, area): # print(' - Time to process: ' + str(timetaken))
print(survex_cave, area)
cave = models.Cave.objects.get(kataster_number=survex_cave, area__short_name=area)
print(cave)
#cave = models.Cave.objects.get(kataster_number=survex_cave)
cave.survexblock_set.all().delete()
cave.survexfile_set.all().delete()
cave.survexdirectory_set.all().delete()
survexfile = models.SurvexFile(path="caves-" + cave.kat_area() + "/" + survex_cave + "/" + survex_cave, cave=cave)
survexfile.save()
survexfile.SetDirectory()
survexblockroot = models.SurvexBlock(name="root", survexpath="caves-" + cave.kat_area(), begin_char=0, cave=cave, survexfile=survexfile, totalleglength=0.0)
survexblockroot.save()
fin = survexfile.OpenFile()
textlines = [ ]
RecursiveLoad(survexblockroot, survexfile, fin, textlines)
survexblockroot.text = "".join(textlines)
survexblockroot.save()
def LoadAllSurvexBlocks(): def LoadAllSurvexBlocks():
@@ -249,7 +362,7 @@ def LoadAllSurvexBlocks():
print(" - Data flushed") print(" - Data flushed")
survexfile = models.SurvexFile(path="all", cave=None) survexfile = models.SurvexFile(path=settings.SURVEX_TOPNAME, cave=None)
survexfile.save() survexfile.save()
survexfile.SetDirectory() survexfile.SetDirectory()
@@ -258,22 +371,13 @@ def LoadAllSurvexBlocks():
survexblockroot.save() survexblockroot.save()
fin = survexfile.OpenFile() fin = survexfile.OpenFile()
textlines = [ ] textlines = [ ]
# The real work starts here
RecursiveLoad(survexblockroot, survexfile, fin, textlines) RecursiveLoad(survexblockroot, survexfile, fin, textlines)
fin.close()
survexblockroot.text = "".join(textlines) survexblockroot.text = "".join(textlines)
survexblockroot.save() survexblockroot.save()
#Load each cave,
#FIXME this should be dealt with load all above
print(" - Reloading all caves")
caves = models.Cave.objects.all()
for cave in caves:
if cave.kataster_number and os.path.isdir(os.path.join(settings.SURVEX_DATA, "caves-" + cave.kat_area(), cave.kataster_number)):
if cave.kataster_number not in ['40']:
print("loading", cave, cave.kat_area())
ReloadSurvexCave(cave.kataster_number, cave.kat_area())
poslineregex = re.compile(r"^\(\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*)\s*\)\s*([^\s]+)$") poslineregex = re.compile(r"^\(\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*)\s*\)\s*([^\s]+)$")
@@ -281,12 +385,12 @@ def LoadPos():
print('Loading Pos....') print('Loading Pos....')
call([settings.CAVERN, "--output=%s/all.3d" % settings.SURVEX_DATA, "%s/all.svx" % settings.SURVEX_DATA]) call([settings.CAVERN, "--output=%s%s.3d" % (settings.SURVEX_DATA, settings.SURVEX_TOPNAME), "%s%s.svx" % (settings.SURVEX_DATA, settings.SURVEX_TOPNAME)])
call([settings.THREEDTOPOS, '%sall.3d' % settings.SURVEX_DATA], cwd = settings.SURVEX_DATA) call([settings.THREEDTOPOS, '%s%s.3d' % (settings.SURVEX_DATA, settings.SURVEX_TOPNAME)], cwd = settings.SURVEX_DATA)
posfile = open("%sall.pos" % settings.SURVEX_DATA) posfile = open("%s%s.pos" % (settings.SURVEX_DATA, settings.SURVEX_TOPNAME))
posfile.readline() #Drop header posfile.readline() #Drop header
for line in posfile.readlines(): for line in posfile.readlines():
r = poslineregex.match(line) r = poslineregex.match(line)
if r: if r:
x, y, z, name = r.groups() x, y, z, name = r.groups()
try: try:

View File

@@ -1,12 +1,7 @@
import sys, os, types, logging, stat import sys, os, types, logging, stat
#sys.path.append('C:\\Expo\\expoweb')
#from troggle import *
#os.environ['DJANGO_SETTINGS_MODULE']='troggle.settings'
import settings import settings
from troggle.core.models import * from troggle.core.models import *
from PIL import Image from PIL import Image
#import settings
#import core.models as models
import csv import csv
import re import re
import datetime import datetime
@@ -29,7 +24,7 @@ def readSurveysFromCSV():
try: # could probably combine these two try: # could probably combine these two
surveytab = open(os.path.join(settings.SURVEY_SCANS, "Surveys.csv")) surveytab = open(os.path.join(settings.SURVEY_SCANS, "Surveys.csv"))
except IOError: except IOError:
import cStringIO, urllib import cStringIO, urllib
surveytab = cStringIO.StringIO(urllib.urlopen(settings.SURVEY_SCANS + "/Surveys.csv").read()) surveytab = cStringIO.StringIO(urllib.urlopen(settings.SURVEY_SCANS + "/Surveys.csv").read())
dialect=csv.Sniffer().sniff(surveytab.read()) dialect=csv.Sniffer().sniff(surveytab.read())
surveytab.seek(0,0) surveytab.seek(0,0)
@@ -42,24 +37,21 @@ def readSurveysFromCSV():
print("There are no expeditions in the database. Please run the logbook parser.") print("There are no expeditions in the database. Please run the logbook parser.")
sys.exit() sys.exit()
logging.info("Deleting all scanned images") logging.info("Deleting all scanned images")
ScannedImage.objects.all().delete() ScannedImage.objects.all().delete()
logging.info("Deleting all survey objects") logging.info("Deleting all survey objects")
Survey.objects.all().delete() Survey.objects.all().delete()
logging.info("Beginning to import surveys from "+str(os.path.join(settings.SURVEYS, "Surveys.csv"))+"\n"+"-"*60+"\n") logging.info("Beginning to import surveys from "+str(os.path.join(settings.SURVEYS, "Surveys.csv"))+"\n"+"-"*60+"\n")
for survey in surveyreader: for survey in surveyreader:
#I hate this, but some surveys have a letter eg 2000#34a. The next line deals with that. # I hate this, but some surveys have a letter eg 2000#34a. The next line deals with that.
walletNumberLetter = re.match(r'(?P<number>\d*)(?P<letter>[a-zA-Z]*)',survey[header['Survey Number']]) walletNumberLetter = re.match(r'(?P<number>\d*)(?P<letter>[a-zA-Z]*)',survey[header['Survey Number']])
# print(walletNumberLetter.groups()) # print(walletNumberLetter.groups())
year=survey[header['Year']] year=survey[header['Year']]
surveyobj = Survey( surveyobj = Survey(
expedition = Expedition.objects.filter(year=year)[0], expedition = Expedition.objects.filter(year=year)[0],
wallet_number = walletNumberLetter.group('number'), wallet_number = walletNumberLetter.group('number'),
@@ -73,7 +65,6 @@ def readSurveysFromCSV():
pass pass
surveyobj.save() surveyobj.save()
logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r") logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r")
# dead # dead
@@ -99,7 +90,7 @@ def parseSurveyScans(expedition, logfile=None):
#scanList = listdir(expedition.year, surveyFolder) #scanList = listdir(expedition.year, surveyFolder)
scanList=os.listdir(os.path.join(yearPath,surveyFolder)) scanList=os.listdir(os.path.join(yearPath,surveyFolder))
except AttributeError: except AttributeError:
print(surveyFolder + " ignored\r",) print("Folder: " + surveyFolder + " ignored\r")
continue continue
for scan in scanList: for scan in scanList:
@@ -107,7 +98,7 @@ def parseSurveyScans(expedition, logfile=None):
scanChopped=re.match(r'(?i).*(notes|elev|plan|elevation|extend)(\d*)\.(png|jpg|jpeg)',scan).groups() scanChopped=re.match(r'(?i).*(notes|elev|plan|elevation|extend)(\d*)\.(png|jpg|jpeg)',scan).groups()
scanType,scanNumber,scanFormat=scanChopped scanType,scanNumber,scanFormat=scanChopped
except AttributeError: except AttributeError:
print(scan + " ignored\r",) print("File: " + scan + " ignored\r")
continue continue
if scanType == 'elev' or scanType == 'extend': if scanType == 'elev' or scanType == 'extend':
scanType = 'elevation' scanType = 'elevation'
@@ -141,14 +132,14 @@ def parseSurveyScans(expedition, logfile=None):
yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year) yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year)
print("No folder found for " + expedition.year + " at:- " + yearPath) print("No folder found for " + expedition.year + " at:- " + yearPath)
# dead
def parseSurveys(logfile=None): def parseSurveys(logfile=None):
try: try:
readSurveysFromCSV() readSurveysFromCSV()
except (IOError, OSError): except (IOError, OSError):
print("Survey CSV not found..") print("Survey CSV not found..")
pass pass
for expedition in Expedition.objects.filter(year__gte=2000): #expos since 2000, because paths and filenames were nonstandard before then for expedition in Expedition.objects.filter(year__gte=2000): #expos since 2000, because paths and filenames were nonstandard before then
parseSurveyScans(expedition) parseSurveyScans(expedition)
@@ -174,28 +165,25 @@ def GetListDir(sdir):
ff = os.path.join(sdir, f) ff = os.path.join(sdir, f)
res.append((f, ff, os.path.isdir(ff))) res.append((f, ff, os.path.isdir(ff)))
return res return res
def LoadListScansFile(survexscansfolder): def LoadListScansFile(survexscansfolder):
gld = [ ] gld = [ ]
# flatten out any directories in these book files # flatten out any directories in these book files
for (fyf, ffyf, fisdiryf) in GetListDir(survexscansfolder.fpath): for (fyf, ffyf, fisdiryf) in GetListDir(survexscansfolder.fpath):
if fisdiryf: if fisdiryf:
gld.extend(GetListDir(ffyf)) gld.extend(GetListDir(ffyf))
else: else:
gld.append((fyf, ffyf, fisdiryf)) gld.append((fyf, ffyf, fisdiryf))
for (fyf, ffyf, fisdiryf) in gld: for (fyf, ffyf, fisdiryf) in gld:
#assert not fisdiryf, ffyf #assert not fisdiryf, ffyf
if re.search(r"\.(?:png|jpg|jpeg)(?i)$", fyf): if re.search(r"\.(?:png|jpg|jpeg)(?i)$", fyf):
survexscansingle = SurvexScanSingle(ffile=ffyf, name=fyf, survexscansfolder=survexscansfolder) survexscansingle = SurvexScanSingle(ffile=ffyf, name=fyf, survexscansfolder=survexscansfolder)
survexscansingle.save() survexscansingle.save()
# this iterates through the scans directories (either here or on the remote server) # this iterates through the scans directories (either here or on the remote server)
# and builds up the models we can access later # and builds up the models we can access later
def LoadListScans(): def LoadListScans():
@@ -206,17 +194,17 @@ def LoadListScans():
SurvexScansFolder.objects.all().delete() SurvexScansFolder.objects.all().delete()
# first do the smkhs (large kh survey scans) directory # first do the smkhs (large kh survey scans) directory
survexscansfoldersmkhs = SurvexScansFolder(fpath=os.path.join(settings.SURVEY_SCANS, "smkhs"), walletname="smkhs") survexscansfoldersmkhs = SurvexScansFolder(fpath=os.path.join(settings.SURVEY_SCANS, "smkhs"), walletname="smkhs")
if os.path.isdir(survexscansfoldersmkhs.fpath): if os.path.isdir(survexscansfoldersmkhs.fpath):
survexscansfoldersmkhs.save() survexscansfoldersmkhs.save()
LoadListScansFile(survexscansfoldersmkhs) LoadListScansFile(survexscansfoldersmkhs)
# iterate into the surveyscans directory # iterate into the surveyscans directory
for f, ff, fisdir in GetListDir(os.path.join(settings.SURVEY_SCANS, "surveyscans")): for f, ff, fisdir in GetListDir(os.path.join(settings.SURVEY_SCANS, "surveyscans")):
if not fisdir: if not fisdir:
continue continue
# do the year folders # do the year folders
if re.match(r"\d\d\d\d$", f): if re.match(r"\d\d\d\d$", f):
for fy, ffy, fisdiry in GetListDir(ff): for fy, ffy, fisdiry in GetListDir(ff):
@@ -225,13 +213,13 @@ def LoadListScans():
survexscansfolder = SurvexScansFolder(fpath=ffy, walletname=fy) survexscansfolder = SurvexScansFolder(fpath=ffy, walletname=fy)
survexscansfolder.save() survexscansfolder.save()
LoadListScansFile(survexscansfolder) LoadListScansFile(survexscansfolder)
# do the # do the
elif f != "thumbs": elif f != "thumbs":
survexscansfolder = SurvexScansFolder(fpath=ff, walletname=f) survexscansfolder = SurvexScansFolder(fpath=ff, walletname=f)
survexscansfolder.save() survexscansfolder.save()
LoadListScansFile(survexscansfolder) LoadListScansFile(survexscansfolder)
def FindTunnelScan(tunnelfile, path): def FindTunnelScan(tunnelfile, path):
scansfolder, scansfile = None, None scansfolder, scansfile = None, None
@@ -247,12 +235,12 @@ def FindTunnelScan(tunnelfile, path):
print(scansfilel, len(scansfilel)) print(scansfilel, len(scansfilel))
assert len(scansfilel) == 1 assert len(scansfilel) == 1
scansfile = scansfilel[0] scansfile = scansfilel[0]
if scansfolder: if scansfolder:
tunnelfile.survexscansfolders.add(scansfolder) tunnelfile.survexscansfolders.add(scansfolder)
if scansfile: if scansfile:
tunnelfile.survexscans.add(scansfile) tunnelfile.survexscans.add(scansfile)
elif path and not re.search(r"\.(?:png|jpg|jpeg)$(?i)", path): elif path and not re.search(r"\.(?:png|jpg|jpeg)$(?i)", path):
name = os.path.split(path)[1] name = os.path.split(path)[1]
print("ttt", tunnelfile.tunnelpath, path, name) print("ttt", tunnelfile.tunnelpath, path, name)
@@ -272,21 +260,22 @@ def SetTunnelfileInfo(tunnelfile):
fin = open(ff) fin = open(ff)
ttext = fin.read() ttext = fin.read()
fin.close() fin.close()
mtype = re.search("<(fontcolours|sketch)", ttext) mtype = re.search("<(fontcolours|sketch)", ttext)
assert mtype, ff #assert mtype, ff
tunnelfile.bfontcolours = (mtype.group(1)=="fontcolours") if mtype:
tunnelfile.bfontcolours = (mtype.group(1)=="fontcolours")
tunnelfile.npaths = len(re.findall("<skpath", ttext)) tunnelfile.npaths = len(re.findall("<skpath", ttext))
tunnelfile.save() tunnelfile.save()
# <tunnelxml tunnelversion="version2009-06-21 Matienzo" tunnelproject="ireby" tunneluser="goatchurch" tunneldate="2009-06-29 23:22:17"> # <tunnelxml tunnelversion="version2009-06-21 Matienzo" tunnelproject="ireby" tunneluser="goatchurch" tunneldate="2009-06-29 23:22:17">
# <pcarea area_signal="frame" sfscaledown="12.282584" sfrotatedeg="-90.76982" sfxtrans="11.676667377221136" sfytrans="-15.677173422877454" sfsketch="204description/scans/plan(38).png" sfstyle="" nodeconnzsetrelative="0.0"> # <pcarea area_signal="frame" sfscaledown="12.282584" sfrotatedeg="-90.76982" sfxtrans="11.676667377221136" sfytrans="-15.677173422877454" sfsketch="204description/scans/plan(38).png" sfstyle="" nodeconnzsetrelative="0.0">
for path, style in re.findall('<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext): for path, style in re.findall('<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext):
FindTunnelScan(tunnelfile, path) FindTunnelScan(tunnelfile, path)
# should also scan and look for survex blocks that might have been included # should also scan and look for survex blocks that might have been included
# and also survex titles as well. # and also survex titles as well.
tunnelfile.save() tunnelfile.save()
@@ -306,6 +295,6 @@ def LoadTunnelFiles():
elif f[-4:] == ".xml": elif f[-4:] == ".xml":
tunnelfile = TunnelFile(tunnelpath=lf, tunnelname=os.path.split(f[:-4])[1]) tunnelfile = TunnelFile(tunnelpath=lf, tunnelname=os.path.split(f[:-4])[1])
tunnelfile.save() tunnelfile.save()
for tunnelfile in TunnelFile.objects.all(): for tunnelfile in TunnelFile.objects.all():
SetTunnelfileInfo(tunnelfile) SetTunnelfileInfo(tunnelfile)

View File

@@ -1,65 +0,0 @@
from django.conf import settings
import subprocess, re
import troggle.core.models as models
def load():
print('Load survex files and relations')
load_area('1623')
def load_area(areacode):
print('Searching all cave dirs files')
basedir = settings.SURVEX_DATA+'caves-'+areacode+'/'
cavedirs = bash("find "+basedir+" -maxdepth 1 -type d").splitlines() #this command finds all directories
print('Obtained list of directories! (#dirs='+str(len(cavedirs))+')')
for cavedir in cavedirs:
if cavedir==basedir:
continue #skip the basedir - a non-proper subdirectory
parentname = bash('echo '+cavedir+' | rev | cut -f1 -d \'/\' | rev').splitlines()[0] #get final bit of the directory
parentcave = models.CaveM.objects.filter(survex_file__icontains=cavedir)
if len(parentcave)>1:
print('Non unique parent - skipping. Name:'+parentname)
elif len(parentcave)==0:
print('Error! parent not created:'+parentname)
continue
else: #exaclty one match
print('Adding relations of:'+parentname)
parentcave = parentcave[0]
surveyfiles = bash('find '+cavedir+' -name \'*.svx\'').splitlines()
for fn in surveyfiles:
print(fn)
svxcontents = open(fn,'r').read().splitlines()
try:
dateline = [x for x in svxcontents if ('*date' in x)][0]
date = re.findall('\\d\\d\\d\\d\\.\\d\\d\\.\\d\\d', dateline, re.S)[0]
except:
if( len( [x for x in svxcontents if ('*date' in x)] ) == 0 ):
continue #skip dateless files
print('Date format error in '+fn)
print('Dateline = '+ '"'.join([x for x in svxcontents if ('*date' in x)]))
date = '1900.01.01'
newsurvex = models.SurveyM(survex_file=fn, date=date)
newsurvex.save()
parentcave.surveys.add(newsurvex)
parentcave.save()
def file_exists(filename):
test = bash('if [ ! -f '+filename+' ] ; then echo MISSING; fi')#test for file exisence
if 'MISSING' in test: #send error message to the database
return False
return True
def bash(cmd): #calls command in bash shell, returns output
process = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
output, error = process.communicate()
return output

View File

@@ -27,7 +27,7 @@ from django.conf.urls import *
from profiles import views from profiles import views
urlpatterns = patterns('', urlpatterns = [
url(r'^select/$', url(r'^select/$',
views.select_profile, views.select_profile,
name='profiles_select_profile'), name='profiles_select_profile'),
@@ -43,4 +43,4 @@ urlpatterns = patterns('',
url(r'^$', url(r'^$',
views.profile_list, views.profile_list,
name='profiles_profile_list'), name='profiles_profile_list'),
) ]

View File

@@ -14,8 +14,7 @@ try:
except ImportError: # django >= 1.7 except ImportError: # django >= 1.7
SiteProfileNotAvailable = type('SiteProfileNotAvailable', (Exception,), {}) SiteProfileNotAvailable = type('SiteProfileNotAvailable', (Exception,), {})
from django.db.models import get_model from django.apps import apps
def get_profile_model(): def get_profile_model():
""" """
@@ -28,7 +27,7 @@ def get_profile_model():
if (not hasattr(settings, 'AUTH_PROFILE_MODULE')) or \ if (not hasattr(settings, 'AUTH_PROFILE_MODULE')) or \
(not settings.AUTH_PROFILE_MODULE): (not settings.AUTH_PROFILE_MODULE):
raise SiteProfileNotAvailable raise SiteProfileNotAvailable
profile_mod = get_model(*settings.AUTH_PROFILE_MODULE.split('.')) profile_mod = apps.get_model(*settings.AUTH_PROFILE_MODULE.split('.'))
if profile_mod is None: if profile_mod is None:
raise SiteProfileNotAvailable raise SiteProfileNotAvailable
return profile_mod return profile_mod

View File

@@ -8,9 +8,8 @@ BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Django settings for troggle project. # Django settings for troggle project.
DEBUG = True DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = [] ALLOWED_HOSTS = [u'expo.survex.com']
ADMINS = ( ADMINS = (
# ('Your Name', 'your_email@domain.com'), # ('Your Name', 'your_email@domain.com'),
@@ -45,37 +44,87 @@ NOTABLECAVESHREFS = [ "161", "204", "258", "76", "107", "264" ]
# Examples: "http://foo.com/media/", "/media/". # Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/troggle/media-admin/' ADMIN_MEDIA_PREFIX = '/troggle/media-admin/'
PHOTOS_ROOT = os.path.join(EXPOWEB, 'photos') PHOTOS_ROOT = os.path.join(EXPOWEB, 'photos')
CAVEDESCRIPTIONS = os.path.join(EXPOWEB, "noinfo", "cave_data") CAVEDESCRIPTIONS = os.path.join(EXPOWEB, "cave_data")
ENTRANCEDESCRIPTIONS = os.path.join(EXPOWEB, "noinfo", "entrance_data") ENTRANCEDESCRIPTIONS = os.path.join(EXPOWEB, "entrance_data")
MEDIA_URL = urlparse.urljoin(URL_ROOT , '/site_media/') MEDIA_URL = urlparse.urljoin(URL_ROOT , '/site_media/')
SURVEYS_URL = urlparse.urljoin(URL_ROOT , '/survey_scans/') SURVEYS_URL = urlparse.urljoin(URL_ROOT , '/survey_scans/')
PHOTOS_URL = urlparse.urljoin(URL_ROOT , '/photos/') PHOTOS_URL = urlparse.urljoin(URL_ROOT , '/photos/')
SVX_URL = urlparse.urljoin(URL_ROOT , '/survex/') SVX_URL = urlparse.urljoin(URL_ROOT , '/survex/')
# top-level survex file basename (without .svx)
SURVEX_TOPNAME = "1623"
KAT_AREAS = ['1623', '1624', '1626', '1627']
DEFAULT_LOGBOOK_PARSER = "Parseloghtmltxt"
DEFAULT_LOGBOOK_FILE = "logbook.html"
LOGBOOK_PARSER_SETTINGS = {
"2018": ("2018/logbook.html", "Parseloghtmltxt"),
"2017": ("2017/logbook.html", "Parseloghtmltxt"),
"2016": ("2016/logbook.html", "Parseloghtmltxt"),
"2015": ("2015/logbook.html", "Parseloghtmltxt"),
"2014": ("2014/logbook.html", "Parseloghtmltxt"),
"2013": ("2013/logbook.html", "Parseloghtmltxt"),
"2012": ("2012/logbook.html", "Parseloghtmltxt"),
"2011": ("2011/logbook.html", "Parseloghtmltxt"),
"2010": ("2010/logbook.html", "Parselogwikitxt"),
"2009": ("2009/2009logbook.txt", "Parselogwikitxt"),
"2008": ("2008/2008logbook.txt", "Parselogwikitxt"),
"2007": ("2007/logbook.html", "Parseloghtmltxt"),
"2006": ("2006/logbook/logbook_06.txt", "Parselogwikitxt"),
"2005": ("2005/logbook.html", "Parseloghtmltxt"),
"2004": ("2004/logbook.html", "Parseloghtmltxt"),
"2003": ("2003/logbook.html", "Parseloghtml03"),
"2002": ("2002/logbook.html", "Parseloghtmltxt"),
"2001": ("2001/log.htm", "Parseloghtml01"),
"2000": ("2000/log.htm", "Parseloghtml01"),
"1999": ("1999/log.htm", "Parseloghtml01"),
"1998": ("1998/log.htm", "Parseloghtml01"),
"1997": ("1997/log.htm", "Parseloghtml01"),
"1996": ("1996/log.htm", "Parseloghtml01"),
"1995": ("1995/log.htm", "Parseloghtml01"),
"1994": ("1994/log.htm", "Parseloghtml01"),
"1993": ("1993/log.htm", "Parseloghtml01"),
"1992": ("1992/log.htm", "Parseloghtml01"),
"1991": ("1991/log.htm", "Parseloghtml01"),
}
APPEND_SLASH = False APPEND_SLASH = False
SMART_APPEND_SLASH = True SMART_APPEND_SLASH = True
# Make this unique, and don't share it with anybody. # Make this unique, and don't share it with anybody.
SECRET_KEY = 'a#vaeozn0)uz_9t_%v5n#tj)m+%ace6b_0(^fj!355qki*v)j2' SECRET_KEY = 'a#vaeozn0)uz_9t_%v5n#tj)m+%ace6b_0(^fj!355qki*v)j2'
# List of callables that know how to import templates from various sources. TEMPLATES = [
TEMPLATE_LOADERS = ( {
'django.template.loaders.filesystem.Loader', 'BACKEND': 'django.template.backends.django.DjangoTemplates',
'django.template.loaders.app_directories.Loader', 'DIRS': [
# 'django.template.loaders.eggs.load_template_source', os.path.join(PYTHON_PATH, 'templates')
) ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
#'core.context.troggle_context'
]
},
},
]
if django.VERSION[0] == 1 and django.VERSION[1] < 4: if django.VERSION[0] == 1 and django.VERSION[1] < 4:
authmodule = 'django.core.context_processors.auth' authmodule = 'django.core.context_processors.auth'
else: else:
authmodule = 'django.contrib.auth.context_processors.auth' authmodule = 'django.contrib.auth.context_processors.auth'
TOPCAMPX=411571.00
TOPCAMPY=5282639.00
TEMPLATE_CONTEXT_PROCESSORS = ( authmodule, "core.context.troggle_context", )
LOGIN_REDIRECT_URL = '/' LOGIN_REDIRECT_URL = '/'
INSTALLED_APPS = ( INSTALLED_APPS = (
@@ -88,14 +137,13 @@ INSTALLED_APPS = (
'django.contrib.messages', 'django.contrib.messages',
'django.contrib.staticfiles', 'django.contrib.staticfiles',
#'troggle.photologue', #'troggle.photologue',
#'troggle.reversion',
#'django_evolution',
'tinymce', 'tinymce',
'registration', 'registration',
'troggle.profiles', 'troggle.profiles',
'troggle.core', 'troggle.core',
'troggle.flatpages', 'troggle.flatpages',
'troggle.imagekit', 'imagekit',
'django_extensions',
) )
MIDDLEWARE_CLASSES = ( MIDDLEWARE_CLASSES = (

View File

@@ -2,14 +2,14 @@
<html lang="en"> <html lang="en">
<head> <head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"/> <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"/>
<link rel="stylesheet" type="text/css" href="{{ settings.MEDIA_URL }}css/main3.css" title="eyeCandy"/> <link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}css/main3.css" title="eyeCandy"/>
<link rel="alternate stylesheet" type="text/css" href="{{ settings.MEDIA_URL }}css/mainplain.css" title="plain"/> <link rel="alternate stylesheet" type="text/css" href="{{ MEDIA_URL }}css/mainplain.css" title="plain"/>
<link rel="stylesheet" type="text/css" href="{{ settings.MEDIA_URL }}css/dropdownNavStyle.css" /> <link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}css/dropdownNavStyle.css" />
<title>{% block title %}Troggle{% endblock %}</title> <title>{% block title %}Troggle{% endblock %}</title>
<script src="{{ settings.JSLIB_URL }}jquery/jquery.min.js" type="text/javascript"></script> <!-- <script src="{{ settings.JSLIB_URL }}jquery/jquery.min.js" type="text/javascript"></script> -->
<script src="{{ settings.MEDIA_URL }}js/jquery.quicksearch.js" type="text/javascript"></script> <script src="{{ MEDIA_URL }}js/jquery.quicksearch.js" type="text/javascript"></script>
<script src="{{ settings.MEDIA_URL }}js/base.js" type="text/javascript"></script> <script src="{{ MEDIA_URL }}js/base.js" type="text/javascript"></script>
<script src="{{ settings.MEDIA_URL }}js/jquery.dropdownPlain.js" type="text/javascript"></script> <script src="{{ MEDIA_URL }}js/jquery.dropdownPlain.js" type="text/javascript"></script>
{% block head %}{% endblock %} {% block head %}{% endblock %}
</head> </head>
@@ -64,8 +64,8 @@
<div id="related"> <div id="related">
{% block related %} {% block related %}
<script language="javascript"> <script language="javascript">
$('#related').remove() $('#related').remove()
/*This is a hack to stop a line appearing because of the empty div border*/ /*This is a hack to stop a line appearing because of the empty div border*/
</script> </script>
{% endblock %} {% endblock %}
</div> </div>

View File

@@ -17,7 +17,7 @@ div.cv-panel {
} }
div.cv-compass, div.cv-ahi { div.cv-compass, div.cv-ahi {
position: absolute; position: absolute;
bottom: 95px; bottom: 95px;
right: 5px; right: 5px;
margin: 0; margin: 0;
@@ -31,7 +31,7 @@ div.cv-compass, div.cv-ahi {
background-color: black; background-color: black;
color: white; color: white;
} }
div.cv-ahi { div.cv-ahi {
right: 95px; right: 95px;
} }
@@ -152,7 +152,7 @@ div.linear-scale-caption {
position: absolute; position: absolute;
top: 64px; top: 64px;
left: 0px; left: 0px;
height: auto; height: auto;
margin-top:0; margin-top:0;
bottom: 44px; bottom: 44px;
background-color: #222222; background-color: #222222;
@@ -220,7 +220,7 @@ div.linear-scale-caption {
} }
#frame .tab { #frame .tab {
position: absolute; position: absolute;
right: 0px;lass="cavedisplay" right: 0px;
width: 40px; width: 40px;
height: 40px; height: 40px;
box-sizing: border-box; box-sizing: border-box;
@@ -421,7 +421,7 @@ div#scene {
CV.UI.init( 'scene', { CV.UI.init( 'scene', {
home: '/javascript/CaveView/', home: '/javascript/CaveView/',
surveyDirectory: '/cave/3d/', surveyDirectory: '/cave/3d/',
terrainDirectory: '/loser/surface/terrain/' terrainDirectory: '/loser/surface/terrain/'
} ); } );
// load a single survey to display // load a single survey to display
@@ -516,14 +516,17 @@ div#scene {
{% if ent.entrance.exact_station %} {% if ent.entrance.exact_station %}
<dt>Exact Station</dt><dd>{{ ent.entrance.exact_station|safe }} {{ ent.entrance.exact_location.y|safe }}, {{ ent.entrance.exact_location.x|safe }}, {{ ent.entrance.exact_location.z|safe }}m</dd> <dt>Exact Station</dt><dd>{{ ent.entrance.exact_station|safe }} {{ ent.entrance.exact_location.y|safe }}, {{ ent.entrance.exact_location.x|safe }}, {{ ent.entrance.exact_location.z|safe }}m</dd>
{% endif %} {% endif %}
{% if ent.entrance.other_station %} {% if ent.entrance.find_location %}
<dt>Coordinates</dt><dd>{{ ent.entrance.find_location|safe }}</dd>
{% endif %}
{% if ent.entrance.other_station %}
<dt>Other Station</dt><dd>{{ ent.entrance.other_station|safe }} <dt>Other Station</dt><dd>{{ ent.entrance.other_station|safe }}
{% if ent.entrance.other_description %} {% if ent.entrance.other_description %}
- {{ ent.entrance.other_description|safe }} - {{ ent.entrance.other_description|safe }}
{% endif %} {{ ent.entrance.other_location.y|safe }}, {{ ent.entrance.other_location.x|safe }}, {{ ent.entrance.other_location.z|safe }}m {% endif %} {{ ent.entrance.other_location.y|safe }}, {{ ent.entrance.other_location.x|safe }}, {{ ent.entrance.other_location.z|safe }}m
</dd> </dd>
{% endif %} {% endif %}
</dl> </dl>
</li> </li>
{% endfor %} {% endfor %}
</ul> </ul>

View File

@@ -11,7 +11,7 @@
<h3>Notable caves</h3> <h3>Notable caves</h3>
<ul> <ul>
{% for cave in notablecaves %} {% for cave in notablecaves %}
<li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }}{% else %}{{cave.unofficial_number }}{%endif %} {{cave.official_name|safe}}</a> </li> <li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }}{% else %}{{cave.unofficial_number }}{% endif %} {{cave.official_name|safe}}</a> </li>
{% endfor %} {% endfor %}
</ul> </ul>
@@ -20,7 +20,7 @@
<table class="searchable"> <table class="searchable">
{% for cave in caves1623 %} {% for cave in caves1623 %}
<tr><td> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }}{% else %}{{cave.unofficial_number }}{%endif %} {{cave.official_name|safe}}</a> {{ cave.slug }}</td></tr> <tr><td> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }} {{cave.official_name|safe}}</a> {% if cave.unofficial_number %}({{cave.unofficial_number }}){% endif %}{% else %}{{cave.unofficial_number }} {{cave.official_name|safe}}</a> {% endif %}</td></tr>
{% endfor %} {% endfor %}
</table> </table>
@@ -30,7 +30,8 @@
<ul class="searchable"> <ul class="searchable">
{% for cave in caves1626 %} {% for cave in caves1626 %}
<li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }}{% else %}{{cave.unofficial_number }}{%endif %} {{cave.official_name|safe}}</a> </li> <li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }} {{cave.official_name|safe}}</a> {% if cave.unofficial_number %}({{cave.unofficial_number }}){% endif %}{% else %}{{cave.unofficial_number }} {{cave.official_name|safe}}</a> {% endif %}
</li>
{% endfor %} {% endfor %}
</ul> </ul>

View File

@@ -23,25 +23,45 @@
<form name="reset" method="post" action=""> <form name="reset" method="post" action="">
<h3>Wipe:</h3> <h3>Wipe:</h3>
<table>
<table> <tr>
<tr><td>Wipe entire database and recreate tables: </td><td><input type="checkbox" name="reload_db" /></td><td> <input type="submit" id="Import" value="I really want to delete all information in troggle, and accept all responsibility."></td></tr> <td>Wipe entire database and recreate tables: </td>
</table> <td><input type="checkbox" name="reload_db" /></td>
<td>
<input type="submit" id="Import" value="I really want to delete all information in troggle, and accept all responsibility.">
</td>
</tr>
</table>
</form> </form>
<h3>Import (non-destructive):</h3> <h3>Import (non-destructive):</h3>
<form name="import" method="post" action=""> <form name="import" method="post" action="">
<table> <table>
<tr><td>people from folk.csv using parsers\people.py</td><td><input type="checkbox" name="import_people"/></td></tr> <tr>
<tr><td>caves from cavetab2.csv using parsers\cavetab.py</td><td> <input type="checkbox" class="parser" name="import_cavetab"/></td></tr> <td>people from folk.csv using parsers\people.py</td>
<tr><td>logbook entries using parsers\logbooks.py</td><td><input type="checkbox" name="import_logbooks"/></td></tr> <td><input type="checkbox" name="import_people"/></td>
<tr><td>QMs using parsers\QMs.py</td><td><input type="checkbox" name="import_QMs" /></td></tr> </tr>
<tr><td>survey scans using parsers\surveys.py</td><td><input type="checkbox" name="import_surveys" /></td></tr> <tr>
<tr><td>survex data using parsers\survex.py</td><td><input type="checkbox" name="import_survex" /></td></tr> <td>caves from cavetab2.csv using parsers\cavetab.py</td>
<td> <input type="checkbox" class="parser" name="import_cavetab"/></td>
</table> </tr>
<tr>
<td>logbook entries using parsers\logbooks.py</td>
<td><input type="checkbox" name="import_logbooks"/></td>
</tr>
<tr>
<td>QMs using parsers\QMs.py</td>
<td><input type="checkbox" name="import_QMs" /></td>
</tr>
<tr>
<td>survey scans using parsers\surveys.py</td>
<td><input type="checkbox" name="import_surveys" /></td>
</tr>
<tr>
<td>survex data using parsers\survex.py</td>
<td><input type="checkbox" name="import_survex" /></td>
</tr>
</table>
<p> <p>
<input type="submit" id="Import" value="Import"> <input type="submit" id="Import" value="Import">

View File

@@ -9,6 +9,7 @@
<script src="{{ settings.TINY_MCE_MEDIA_URL }}tiny_mce.js" type="text/javascript"></script> <script src="{{ settings.TINY_MCE_MEDIA_URL }}tiny_mce.js" type="text/javascript"></script>
{% endblock %} {% endblock %}
{% block content %} {% block content %}
<h1>Edit Cave</h1>
<form action="" method="post">{% csrf_token %} <form action="" method="post">{% csrf_token %}
<table>{{ form }}{{caveAndEntranceFormSet}}</table> <table>{{ form }}{{caveAndEntranceFormSet}}</table>
{{ versionControlForm }} {{ versionControlForm }}

View File

@@ -3,6 +3,11 @@
{% block extrahead %} {% block extrahead %}
{% load csrffaker %} {% load csrffaker %}
<script src="{{ settings.TINY_MCE_MEDIA_URL }}tiny_mce.js" type="text/javascript"></script> <script src="{{ settings.TINY_MCE_MEDIA_URL }}tiny_mce.js" type="text/javascript"></script>
<script type="text/javascript">
tinyMCE.init({
mode : "textareas"
});
</script>
{% endblock %} {% endblock %}
{% block body %} {% block body %}
<h1>Edit {{ path }}</h1> <h1>Edit {{ path }}</h1>

View File

@@ -2,19 +2,15 @@
{% load wiki_markup %} {% load wiki_markup %}
{% load link %} {% load link %}
{% block title %}Expedition {{expedition.name}}{% endblock %} {% block title %}Expedition {{this_expedition.name}}{% endblock %}
{% block editLink %}<a href={{expedition.get_admin_url}}>Edit expedition {{expedition|wiki_to_html_short}}</a>{% endblock %} {% block editLink %}<a href={{this_expedition.get_admin_url}}>Edit expedition {{expedition|wiki_to_html_short}}</a>{% endblock %}
{% block related %} {% block related %}
{% endblock %} {% endblock %}
{% block content %} {% block content %}
{% if message %} <h2>{{this_expedition.name}}</h2>
<p>debug message: {{message}}</p>
{% endif %}
<h2>{{expedition.name}}</h2>
<p><b>Other years:</b> <p><b>Other years:</b>
{% for otherexpedition in expeditions %} {% for otherexpedition in expeditions %}
@@ -33,7 +29,7 @@ an "S" for a survey trip. The colours are the same for people on the same trip.
<table class="expeditionpersonlist"> <table class="expeditionpersonlist">
<tr> <tr>
<th>Caver</th> <th>Caver</th>
{% for expeditionday in expedition.expeditionday_set.all %} {% for expeditionday in this_expedition.expeditionday_set.all %}
<th> <th>
{{expeditionday.date.day}} {{expeditionday.date.day}}
</th> </th>
@@ -41,13 +37,13 @@ an "S" for a survey trip. The colours are the same for people on the same trip.
</tr> </tr>
{% for personexpeditionday in personexpeditiondays %} {% for personexpeditionday in personexpeditiondays %}
<tr> <tr>
<td><a href="{{ personexpeditionday.personexpedition.get_absolute_url }}">{{personexpeditionday.personexpedition.person}}</a></td> <td><a href="{{ personexpeditionday.personexpedition.get_absolute_url }}">{{personexpeditionday.personexpedition.person|safe}}</a></td>
{% for persondayactivities in personexpeditionday.personrow %} {% for persondayactivities in personexpeditionday.personrow %}
{% if persondayactivities.persontrips or persondayactivities.survexblocks %} {% if persondayactivities.persontrips or persondayactivities.survexblocks %}
<td class="persondayactivity"> <td class="persondayactivity">
{% for persontrip in persondayactivities.persontrips %} {% for persontrip in persondayactivities.persontrips %}
<a href="{{persontrip.logbook_entry.get_absolute_url}}" class="dayindexlog-{{persontrip.logbook_entry.DayIndex}}">T</a> <a href="{{persontrip.logbook_entry.get_absolute_url}}" class="dayindexlog-1">T</a>
{% endfor %} {% endfor %}
<br/> <br/>
{% for survexblock in persondayactivities.survexblocks %} {% for survexblock in persondayactivities.survexblocks %}
@@ -67,13 +63,13 @@ an "S" for a survey trip. The colours are the same for people on the same trip.
<form action="" method="GET"><input type="submit" name="reload" value="Reload"></form> <form action="" method="GET"><input type="submit" name="reload" value="Reload"></form>
<h3>Logbooks and survey trips per day</h3> <h3>Logbooks and survey trips per day</h3>
<a href="{% url "newLogBookEntry" expeditionyear=expedition.year %}">New logbook entry</a> <a href="{% url "newLogBookEntry" expeditionyear=this_expedition.year %}">New logbook entry</a>
<table class="expeditionlogbooks"> <table class="expeditionlogbooks">
<tr><th>Date</th><th>Logged trips</th><th>Surveys</th></tr> <tr><th>Date</th><th>Logged trips</th><th>Surveys</th></tr>
{% regroup dateditems|dictsort:"date" by date as dates %} {% regroup dateditems|dictsort:"date" by date as dates %}
{% for date in dates %} {% for date in dates %}
<tr> <tr>
<td>{{date.grouper}}</td> <td>{{date.grouper|date:"D d M Y"}}</td>
<td>{% for item in date.list %} <td>{% for item in date.list %}
{% if item.isLogbookEntry %}<a href="{{ item.get_absolute_url }}">{{item.title|safe}}</a><br/>{% endif %} {% if item.isLogbookEntry %}<a href="{{ item.get_absolute_url }}">{{item.title|safe}}</a><br/>{% endif %}
{% endfor %}</td> {% endfor %}</td>

View File

@@ -16,7 +16,7 @@
{% if entry.is_deletion %} {% if entry.is_deletion %}
{{ entry.object_repr }} {{ entry.object_repr }}
{% else %} {% else %}
<a href="admin/{{ entry.get_admin_url }}">{{ entry.object_repr }}</a> <a href="admin/{{ entry.get_admin_url }}/">{{ entry.object_repr }}</a>
{% endif %} {% endif %}
<br/> <br/>
{% if entry.content_type %} {% if entry.content_type %}
@@ -49,17 +49,6 @@ Here you will find information about the {{expedition.objects.count}} expedition
If you are an expedition member, please sign up using the link to the top right and begin editing. If you are an expedition member, please sign up using the link to the top right and begin editing.
</p> </p>
<h3>News</h3>
<p class="indent">
Everyone is gearing up for the 2009 expedition; please see the link below for the main expedition website.
</p>
<h3>Troggle development</h3>
<p class="indent">
Troggle is still under development. Check out the <a href="http://troggle.googlecode.com">development page</a> on google code, where you can file bug reports, make suggestions, and help develop the code. There is also an old todo list at <a href="{%url "todo"%}">here</a>.
</p>
</div>
{% endblock content %} {% endblock content %}
{% block margins %} {% block margins %}

View File

@@ -2,12 +2,12 @@
{% load wiki_markup %} {% load wiki_markup %}
{% block title %}Logbook {{logbookentry.id}}{% endblock %} {% block title %}Logbook {{logbookentry.id}}{% endblock %}
{% block editLink %}<a href={{logbookentry.get_admin_url}}>Edit logbook entry {{logbookentry|wiki_to_html_short}}</a>{% endblock %} {% block editLink %}<a href={{logbookentry.get_admin_url}}/>Edit logbook entry {{logbookentry|wiki_to_html_short}}</a>{% endblock %}
{% block content %} {% block content %}
{% block related %}{% endblock %} {% block related %}{% endblock %}
{% block nav %}{% endblock %} {% block nav %}{% endblock %}
<h2>{{logbookentry.title}}</h2> <h2>{{logbookentry.title|safe}}</h2>
<div id="related"> <div id="related">
<p><a href="{{ logbookentry.expedition.get_absolute_url }}">{{logbookentry.expedition.name}}</a></p> <p><a href="{{ logbookentry.expedition.get_absolute_url }}">{{logbookentry.expedition.name}}</a></p>
@@ -20,10 +20,10 @@
<p> <p>
{% if logbookentry.get_previous_by_date %} {% if logbookentry.get_previous_by_date %}
<a href="{{ logbookentry.get_previous_by_date.get_absolute_url }}">{{logbookentry.get_previous_by_date.date}}</a> <a href="{{ logbookentry.get_previous_by_date.get_absolute_url }}">{{logbookentry.get_previous_by_date.date|date:"D d M Y"}}</a>
{% endif %} {% endif %}
{% if logbookentry.get_next_by_date %} {% if logbookentry.get_next_by_date %}
<a href="{{ logbookentry.get_next_by_date.get_absolute_url }}">{{logbookentry.get_next_by_date.date}}</a> <a href="{{ logbookentry.get_next_by_date.get_absolute_url }}">{{logbookentry.get_next_by_date.date|date:"D d M Y"}}</a>
{% endif %} {% endif %}
</p> </p>
@@ -47,12 +47,12 @@
<td> <td>
{% if persontrip.persontrip_prev %} {% if persontrip.persontrip_prev %}
<a href="{{ persontrip.persontrip_prev.logbook_entry.get_absolute_url }}">{{persontrip.persontrip_prev.logbook_entry.date}}</a> <a href="{{ persontrip.persontrip_prev.logbook_entry.get_absolute_url }}">{{persontrip.persontrip_prev.logbook_entry.date|date:"D d M Y"}}</a>
{% endif %} {% endif %}
</td> </td>
<td> <td>
{% if persontrip.persontrip_next %} {% if persontrip.persontrip_next %}
<a href="{{ persontrip.persontrip_next.logbook_entry.get_absolute_url }}">{{persontrip.persontrip_next.logbook_entry.date}}</a> <a href="{{ persontrip.persontrip_next.logbook_entry.get_absolute_url }}">{{persontrip.persontrip_next.logbook_entry.date|date:"D d M Y"}}</a>
{% endif %} {% endif %}
</td> </td>
@@ -65,9 +65,14 @@
</div> </div>
<div id="col1"> <div id="col1">
<div class="logbookentry"> <div class="logbookentry">
<b>{{logbookentry.date}}</b> <b>{{logbookentry.date|date:"D d M Y"}}</b>
{{logbookentry.text|wiki_to_html}}</div> {% if logbookentry.entry_type == "html" %}
<p>{{logbookentry.text|safe}}</p>
{% else %}
{{logbookentry.text|wiki_to_html}}
{% endif %}
</div>
</div> </div>
</div> </div>

View File

@@ -1,69 +0,0 @@
<html>
<head>
<link rel="stylesheet" type="text/css" href="{{ settings.MEDIA_URL }}/css/cavetables.css">
</head>
<body>
<h2>Caves of loserplateau (locations acording to all.3d)</h2>
<span style="font-size:70%">
Name contains:<br>
<div id="inputf"><input type="text" name="CaveName" id="CaveName" style="width:100%"></div><br>
Depth between (min, max) in meters (0 disables filter):<br>
<div id="inputf"><input type="number" name="CaveDepthMin" id="CaveDepthMin" style="width:45%"> - <input type="number" name="CaveDepthMax" id="CaveDepthMax" style="width:45%"></div><br>
Length between (min, max) in meters (0 disables filter):<br>
<div id="inputf"><input type="number" name="CaveLengthMin" id="CaveLengthMin" style="width:45%"> - <input type="number" name="CaveLengthMax" id="CaveLengthMax" style="width:45%"></div><br>
Last visit after (date in YYYY.MM.DD format works best):<br>
<div id="inputf"><input type="text" name="VisitDate" id="VisitDate" style="width:100%"></div><br>
Last visited by (single word or regular expression, search is not case sensitive):<br>
(e.g. <span id="mono">/da.e/</span> matches both Dave and Dane, <span id="mono">/w..k|ol{2}y/</span> matches either Wook and Olly)<br>
<div id="inputf"><input type="text" name="Visitor" id="Visitor" style="width:100%"></div><br>
Hide incomplete entries:<br>
<div id="inputf"><input type="checkbox" name="Incomplete" id="Incomplete" style="width:100%"></div><br><br>
<button onclick="filterTable('caves_table')">Filter</button><br>
<button onclick="filterTableReset('caves_table')">Reset filters</button><br>
Click on column headers to sort/reverse sort<br><br><br>
</span>
<table id="caves_table">
<tr>
<th onclick="sortTable(0,'caves_table',0)">Cave survex id</th>
<th onclick="sortTable(1,'caves_table',0)">Cave name</th>
<th onclick="sortTable(2,'caves_table',1)">Cave depth</th>
<th onclick="sortTable(3,'caves_table',1)">Cave length</th>
<th onclick="sortTable(4,'caves_table',0)">Last leg date</th>
<th onclick="sortTable(5,'caves_table',0)">Cave location (UTM)</th>
<th onclick="sortTable(6,'caves_table',0)">Cave location (lat/lon)</th>
<th onclick="sortTable(7,'caves_table',1)">Top camp distance [m]</th>
</tr>
{% for cave in caves %}
<tr>
<td><a href={{cave.description}}>{{ cave.name }}</a></td>
<td>{{ cave.title }}</td>
<td>{{ cave.total_depth }}</td>
<td>{{ cave.total_length }}</td>
<td>{{ cave.date }}</td>
<td>33U {{ cave.entrance }}</td>
<td>{{ cave.lat_lon_entrance }}</td>
<td>{{ cave.top_camp_distance}}</td>
</tr>
{% endfor %}
</table>
<script type="text/javascript" src="{{ settings.MEDIA_URL }}/scripts/TableSort.js"></script>
</body>
</html>

View File

@@ -18,8 +18,8 @@
{% if pic.is_mugshot %} {% if pic.is_mugshot %}
<div class="figure"> <div class="figure">
<p> <img src="{{ pic.thumbnail_image.url }}" class="thumbnail" /> <p> <img src="{{ pic.thumbnail_image.url }}" class="thumbnail" />
<p> {{ pic.caption }}</p> <p> {{ pic.caption }} </p>
<p> <a href="{{ pic.get_admin_url }}">edit {{pic}}</a> </> <p> <a href="{{ pic.get_admin_url }}">edit {{pic}}</a>
</p> </p>
</p> </p>
</div> </div>
@@ -32,7 +32,7 @@
<ul> <ul>
{% for personexpedition in person.personexpedition_set.all %} {% for personexpedition in person.personexpedition_set.all %}
<li> <a href="{{ personexpedition.get_absolute_url }}">{{personexpedition.expedition.year}}</a> <li> <a href="{{ personexpedition.get_absolute_url }}">{{personexpedition.expedition.year}}</a>
<span style="padding-left:{{personexpedition.persontrip_set.all|length}}0px; background-color:red"></span> <span style="padding-left:{{ personexpedition.persontrip_set.all|length }}0px; background-color:red"></span>
{{personexpedition.persontrip_set.all|length}} trips {{personexpedition.persontrip_set.all|length}} trips
</li> </li>
{% endfor %} {% endfor %}

View File

@@ -7,7 +7,7 @@
{% block content %} {% block content %}
<h1> <h1>
<a href="{{personexpedition.person.get_absolute_url}}">{{personexpedition.person}}</a> : <a href="{{personexpedition.person.get_absolute_url}}">{{personexpedition.person|safe}}</a> :
<a href="{{personexpedition.expedition.get_absolute_url}}">{{personexpedition.expedition}}</a> <a href="{{personexpedition.expedition.get_absolute_url}}">{{personexpedition.expedition}}</a>
</h1> </h1>

View File

@@ -8,12 +8,12 @@
<h2>Notable expoers</h2> <h2>Notable expoers</h2>
<table class="searchable"> <table class="searchable">
<tr><th>Person</th><th>First</th><th>Last</th><th>Notability</th></tr> <tr><th>Person</th><th>First</th><th>Last</th><th>Notability</th></tr>
{% for person in notablepersons %} {% for person in notablepersons|dictsortreversed:"notability" %}
<tr> <tr>
<td><a href="{{ person.get_absolute_url }}">{{person|wiki_to_html_short}}</a></td> <td><a href="{{ person.get_absolute_url }}">{{person|wiki_to_html_short}}</a></td>
<td><a href="{{ person.first.get_absolute_url }}">{{ person.first.expedition.year }}</a></td> <td><a href="{{ person.first.get_absolute_url }}">{{ person.first.expedition.year }}</a></td>
<td><a href="{{ person.last.get_absolute_url }}">{{ person.last.expedition.year }}</a></td> <td><a href="{{ person.last.get_absolute_url }}">{{ person.last.expedition.year }}</a></td>
<td>{{person.notability}}</td> <td>{{person.notability|floatformat:2}}</td>
</tr> </tr>
{% endfor %} {% endfor %}
</table> </table>
@@ -31,8 +31,8 @@
<tr> <tr>
<td><a href="{{ person.get_absolute_url }}">{{person|wiki_to_html_short}}</a></td> <td><a href="{{ person.get_absolute_url }}">{{person|wiki_to_html_short}}</a></td>
<td><a href="{{ person.first.get_absolute_url }}">{{person.first.expedition.year}}</a></td> <td><a href="{{ person.first.get_absolute_url }}">{{person.first.expedition.year}}</a></td>
<td><a href="{{ person.last.get_absolute_url }}">{{person.last.expedition.year}}</a></td> <td><a href="{{ person.last.get_absolute_url }}">{{person.last.expedition.year}}</a></td>
<td>{{ person.surveyedleglength }}</td> <td></td>
</tr> </tr>
{% endfor %} {% endfor %}
</table> </table>

View File

@@ -4,9 +4,7 @@
{% block title %} QM: {{qm|wiki_to_html_short}} {% endblock %} {% block title %} QM: {{qm|wiki_to_html_short}} {% endblock %}
{% block editLink %}| <a href={{qm.get_admin_url}}>Edit QM {{qm|wiki_to_html_short}}</a>{% endblock %} {% block editLink %}| <a href="{{qm.get_admin_url}}/">Edit QM {{qm|wiki_to_html_short}}</a>{% endblock %}
{% block contentheader %} {% block contentheader %}
<table id="cavepage"> <table id="cavepage">

View File

@@ -5,7 +5,7 @@
{% block title %}CUCC Virtual Survey Binder: {{ current_expedition }}{{ current_survey }}{%endblock%} {% block title %}CUCC Virtual Survey Binder: {{ current_expedition }}{{ current_survey }}{%endblock%}
{% block head %} {% block head %}
<link rel="stylesheet" type="text/css" href="{{ settings.MEDIA_URL }}css/nav.css" /> <link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}css/nav.css" />
<script language="javascript"> <script language="javascript">
blankColor = "rgb(153, 153, 153)" blankColor = "rgb(153, 153, 153)"
@@ -164,7 +164,7 @@
</p> </p>
</div> </div>
{% endfor %} {% endfor %}
<div class="figure"> <a href="{{ settings.URL_ROOT }}/admin/expo/scannedimage/add/"> <img src="{{ settings.URL_ROOT }}{{ settings.ADMIN_MEDIA_PREFIX }}img/admin/icon_addlink.gif" /> Add a new scanned notes page. </a> </div> <div class="figure"> <a href="{{ URL_ROOT }}/admin/expo/scannedimage/add/"> <img src="{{ URL_ROOT }}{{ ADMIN_MEDIA_PREFIX }}img/admin/icon_addlink.gif" /> Add a new scanned notes page. </a> </div>
</div> </div>
<br class="clearfloat" /> <br class="clearfloat" />
<div id="survexFileContent" class="behind"> survex file editor, keeping file in original structure <br /> <div id="survexFileContent" class="behind"> survex file editor, keeping file in original structure <br />

View File

@@ -41,7 +41,7 @@
<td>{{survexblock.name}}</td> <td>{{survexblock.name}}</td>
<td> <td>
{% if survexblock.expedition %} {% if survexblock.expedition %}
<a href="{{survexblock.expedition.get_absolute_url}}">{{survexblock.date}}</a> <a href="{{survexblock.expedition.get_absolute_url}}">{{survexblock.date|date:"D d M Y"}}</a>
{% else %} {% else %}
{{survexblock.date}} {{survexblock.date}}
{% endif %} {% endif %}

View File

@@ -4,7 +4,7 @@
{% block title %}{{ title }}{% endblock %} {% block title %}{{ title }}{% endblock %}
{% block head %} {% block head %}
<script src="{{ settings.MEDIA_URL }}js/base.js" type="text/javascript"></script> <script src="{{ MEDIA_URL }}js/base.js" type="text/javascript"></script>
<script type="text/javascript" src="{{settings.JSLIB_URL}}jquery-form/jquery.form.min.js"></script> <script type="text/javascript" src="{{settings.JSLIB_URL}}jquery-form/jquery.form.min.js"></script>
<script type="text/javascript" src="{{settings.JSLIB_URL}}codemirror/codemirror.min.js"></script> <script type="text/javascript" src="{{settings.JSLIB_URL}}codemirror/codemirror.min.js"></script>
@@ -46,7 +46,7 @@ $(document).ready(function()
</p> </p>
{% endif %} {% endif %}
<form id="codewikiform" action="" method="POST"> <form id="codewikiform" action="" method="POST">{% csrf_token %}
<div class="codeframebit">{{form.code}}</div> <div class="codeframebit">{{form.code}}</div>
<div style="display:none">{{form.filename}} {{form.dirname}} {{form.datetime}} {{form.outputtype}}</div> <div style="display:none">{{form.filename}} {{form.dirname}} {{form.datetime}} {{form.outputtype}}</div>
<input type="submit" name="diff" value="Diffy" /> <input type="submit" name="diff" value="Diffy" />

View File

@@ -34,6 +34,6 @@ add wikilinks
{% endblock content %} {% endblock content %}
{% block margins %} {% block margins %}
<img class="leftMargin eyeCandy fadeIn" src="{{ settings.MEDIA_URL }}eieshole.jpg"> <img class="leftMargin eyeCandy fadeIn" src="{{ MEDIA_URL }}eieshole.jpg">
<img class="rightMargin eyeCandy fadeIn" src="{{ settings.MEDIA_URL }}goesser.jpg"> <img class="rightMargin eyeCandy fadeIn" src="{{ MEDIA_URL }}goesser.jpg">
{% endblock margins %} {% endblock margins %}

135
urls.py
View File

@@ -1,17 +1,19 @@
from django.conf.urls import * from django.conf.urls import *
from django.conf import settings from django.conf import settings
from django.conf.urls.static import static
from django.views.static import serve
from core.views import * # flat import from core.views import * # flat import
from core.views_other import * from core.views_other import *
from core.views_caves import * from core.views_caves import *
from core.views_survex import * from core.views_survex import *
from core.models import * from core.models import *
from flatpages.views import *
from django.views.generic.edit import UpdateView from django.views.generic.edit import UpdateView
from django.contrib import admin from django.contrib import admin
from django.views.generic.list import ListView from django.views.generic.list import ListView
from django.contrib import admin from django.contrib import admin
admin.autodiscover() #admin.autodiscover()
# type url probably means it's used. # type url probably means it's used.
@@ -20,29 +22,27 @@ admin.autodiscover()
# <reference to python function in 'core' folder>, # <reference to python function in 'core' folder>,
# <name optional argument for URL reversing (doesn't do much)>) # <name optional argument for URL reversing (doesn't do much)>)
actualurlpatterns = patterns('', actualurlpatterns = [
url(r'^testingurl/?$' , views_caves.millenialcaves, name="testing"),
url(r'^millenialcaves/?$', views_caves.millenialcaves, name="millenialcaves"), url(r'^millenialcaves/?$', views_caves.millenialcaves, name="millenialcaves"),
url(r'^millenialpeople/?$', views_caves.millenialpeople, name="millenialpeople"),
url(r'^cave/descriptionM/([^/]+)/?$', views_caves.millenialdescription),
#url(r'^cave/description/([^/]+)/?$', views_caves.caveDescription),
url(r'^troggle$', views_other.frontpage, name="frontpage"), url(r'^troggle$', views_other.frontpage, name="frontpage"),
url(r'^todo/$', views_other.todo, name="todo"), url(r'^todo/$', views_other.todo, name="todo"),
url(r'^caves/?$', views_caves.caveindex, name="caveindex"), url(r'^caves/?$', views_caves.caveindex, name="caveindex"),
url(r'^people/?$', views_logbooks.personindex, name="personindex"), url(r'^people/?$', views_logbooks.personindex, name="personindex"),
url(r'^newqmnumber/?$', views_other.ajax_QM_number, ), url(r'^newqmnumber/?$', views_other.ajax_QM_number, ),
url(r'^lbo_suggestions/?$', logbook_entry_suggestions), url(r'^lbo_suggestions/?$', logbook_entry_suggestions),
#(r'^person/(?P<person_id>\d*)/?$', views_logbooks.person), #(r'^person/(?P<person_id>\d*)/?$', views_logbooks.person),
url(r'^person/(?P<first_name>[A-Z]*[a-z\-\']*)[^a-zA-Z]*(?P<last_name>[a-z\-\']*[^a-zA-Z]*[A-Z]*[a-z\-]*)/?', views_logbooks.person, name="person"), url(r'^person/(?P<first_name>[A-Z]*[a-z\-\'&;]*)[^a-zA-Z]*(?P<last_name>[a-z\-\']*[^a-zA-Z]*[A-Z]*[a-z\-&;]*)/?', views_logbooks.person, name="person"),
#url(r'^person/(\w+_\w+)$', views_logbooks.person, name="person"), #url(r'^person/(\w+_\w+)$', views_logbooks.person, name="person"),
url(r'^expedition/(\d+)$', views_logbooks.expedition, name="expedition"), url(r'^expedition/(\d+)$', views_logbooks.expedition, name="expedition"),
url(r'^expeditions/?$', views_logbooks.ExpeditionListView.as_view(), name="expeditions"), url(r'^expeditions/?$', views_logbooks.ExpeditionListView.as_view(), name="expeditions"),
url(r'^personexpedition/(?P<first_name>[A-Z]*[a-z]*)[^a-zA-Z]*(?P<last_name>[A-Z]*[a-z]*)/(?P<year>\d+)/?$', views_logbooks.personexpedition, name="personexpedition"), url(r'^personexpedition/(?P<first_name>[A-Z]*[a-z&;]*)[^a-zA-Z]*(?P<last_name>[A-Z]*[a-zA-Z&;]*)/(?P<year>\d+)/?$', views_logbooks.personexpedition, name="personexpedition"),
url(r'^logbookentry/(?P<date>.*)/(?P<slug>.*)/?$', views_logbooks.logbookentry,name="logbookentry"), url(r'^logbookentry/(?P<date>.*)/(?P<slug>.*)/?$', views_logbooks.logbookentry,name="logbookentry"),
url(r'^newlogbookentry/(?P<expeditionyear>.*)$', views_logbooks.newLogbookEntry, name="newLogBookEntry"), url(r'^newlogbookentry/(?P<expeditionyear>.*)$', views_logbooks.newLogbookEntry, name="newLogBookEntry"),
url(r'^editlogbookentry/(?P<expeditionyear>[^/]*)/(?P<pdate>[^/]*)/(?P<pslug>[^/]*)/$', views_logbooks.newLogbookEntry, name="editLogBookEntry"), url(r'^editlogbookentry/(?P<expeditionyear>[^/]*)/(?P<pdate>[^/]*)/(?P<pslug>[^/]*)/$', views_logbooks.newLogbookEntry, name="editLogBookEntry"),
@@ -54,8 +54,8 @@ actualurlpatterns = patterns('',
url(r'^getPeople/(?P<expeditionslug>.*)', views_logbooks.get_people, name = "get_people"), url(r'^getPeople/(?P<expeditionslug>.*)', views_logbooks.get_people, name = "get_people"),
url(r'^getLogBookEntries/(?P<expeditionslug>.*)', views_logbooks.get_logbook_entries, name = "get_logbook_entries"), url(r'^getLogBookEntries/(?P<expeditionslug>.*)', views_logbooks.get_logbook_entries, name = "get_logbook_entries"),
url(r'^cave/new/$', edit_cave, name="newcave"), url(r'^cave/new/$', views_caves.edit_cave, name="newcave"),
url(r'^cave/(?P<cave_id>[^/]+)/?$', views_caves.cave, name="cave"), url(r'^cave/(?P<cave_id>[^/]+)/?$', views_caves.cave, name="cave"),
url(r'^caveslug/([^/]+)/?$', views_caves.caveSlug, name="caveSlug"), url(r'^caveslug/([^/]+)/?$', views_caves.caveSlug, name="caveSlug"),
url(r'^cave/entrance/([^/]+)/?$', views_caves.caveEntrance), url(r'^cave/entrance/([^/]+)/?$', views_caves.caveEntrance),
@@ -73,100 +73,91 @@ actualurlpatterns = patterns('',
# url(r'^jgtuploadfile$', view_surveys.jgtuploadfile, name="jgtuploadfile"), # url(r'^jgtuploadfile$', view_surveys.jgtuploadfile, name="jgtuploadfile"),
url(r'^cave/(?P<cave_id>[^/]+)/?(?P<ent_letter>[^/])$', ent), url(r'^cave/(?P<cave_id>[^/]+)/?(?P<ent_letter>[^/])$', ent),
url(r'^cave/(?P<slug>[^/]+)/edit/$', edit_cave, name="edit_cave"), url(r'^cave/(?P<slug>[^/]+)/edit/$', views_caves.edit_cave, name="edit_cave"),
#(r'^cavesearch', caveSearch), #(r'^cavesearch', caveSearch),
# url(r'^cave/(?P<cave_id>[^/]+)/(?P<year>\d\d\d\d)-(?P<qm_id>\d*)(?P<grade>[ABCDX]?)?$', views_caves.qm, name="qm"),
url(r'^cave/(?P<cave_id>[^/]+)/(?P<year>\d\d\d\d)-(?P<qm_id>\d*)(?P<grade>[ABCDX]?)?$', views_caves.qm, name="qm"), url(r'^cave/qm/(?P<qm_id>[^/]+)?$', views_caves.qm, name="qm"),
url(r'^prospecting_guide/$', views_caves.prospecting), url(r'^prospecting_guide/$', views_caves.prospecting),
url(r'^logbooksearch/(.*)/?$', views_logbooks.logbookSearch), url(r'^logbooksearch/(.*)/?$', views_logbooks.logbookSearch),
url(r'^statistics/?$', views_other.stats, name="stats"), url(r'^statistics/?$', views_other.stats, name="stats"),
url(r'^survey/?$', surveyindex, name="survey"), url(r'^survey/?$', surveyindex, name="survey"),
url(r'^survey/(?P<year>\d\d\d\d)\#(?P<wallet_number>\d*)$', survey, name="survey"), url(r'^survey/(?P<year>\d\d\d\d)\#(?P<wallet_number>\d*)$', survey, name="survey"),
url(r'^controlpanel/?$', views_other.controlPanel, name="controlpanel"), url(r'^controlpanel/?$', views_other.controlPanel, name="controlpanel"),
url(r'^CAVETAB2\.CSV/?$', views_other.downloadCavetab, name="downloadcavetab"), url(r'^CAVETAB2\.CSV/?$', views_other.downloadCavetab, name="downloadcavetab"),
url(r'^Surveys\.csv/?$', views_other.downloadSurveys, name="downloadsurveys"), url(r'^Surveys\.csv/?$', views_other.downloadSurveys, name="downloadsurveys"),
url(r'^logbook(?P<year>\d\d\d\d)\.(?P<extension>.*)/?$',views_other.downloadLogbook), url(r'^logbook(?P<year>\d\d\d\d)\.(?P<extension>.*)/?$',views_other.downloadLogbook),
url(r'^logbook/?$',views_other.downloadLogbook, name="downloadlogbook"), url(r'^logbook/?$',views_other.downloadLogbook, name="downloadlogbook"),
url(r'^cave/(?P<cave_id>[^/]+)/qm\.csv/?$', views_other.downloadQMs, name="downloadqms"), url(r'^cave/(?P<cave_id>[^/]+)/qm\.csv/?$', views_other.downloadQMs, name="downloadqms"),
(r'^downloadqms$', views_other.downloadQMs), url(r'^downloadqms$', views_other.downloadQMs),
url(r'^eyecandy$', views_other.eyecandy), url(r'^eyecandy$', views_other.eyecandy),
(r'^admin/doc/?', include('django.contrib.admindocs.urls')), url(r'^admin/doc/?', include('django.contrib.admindocs.urls')),
#url(r'^admin/(.*)', admin.site.get_urls, name="admin"), #url(r'^admin/(.*)', admin.site.get_urls, name="admin"),
(r'^admin/', include(admin.site.urls)), url(r'^admin/', include(admin.site.urls)),
# don't know why this needs troggle/ in here. nice to get it out # don't know why this needs troggle/ in here. nice to get it out
url(r'^troggle/media-admin/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ADMIN_DIR, 'show_indexes':True}), # url(r'^troggle/media-admin/(?P<path>.*)$', static, {'document_root': settings.MEDIA_ADMIN_DIR, 'show_indexes':True}),
(r'^accounts/', include('registration.backends.default.urls')), url(r'^accounts/', include('registration.backends.default.urls')),
(r'^profiles/', include('profiles.urls')), url(r'^profiles/', include('profiles.urls')),
# (r'^personform/(.*)$', personForm), # (r'^personform/(.*)$', personForm),
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve', url(r'^site_media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
(r'^tinymce_media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.TINY_MCE_MEDIA_ROOT, 'show_indexes': True}),
url(r'^survexblock/(.+)$', views_caves.survexblock, name="survexblock"), url(r'^survexblock/(.+)$', views_caves.survexblock, name="survexblock"),
url(r'^survexfile/(?P<survex_file>.*?)\.svx$', views_survex.svx, name="svx"), url(r'^survexfile/(?P<survex_file>.*?)\.svx$', views_survex.svx, name="svx"),
url(r'^survexfile/(?P<survex_file>.*?)\.3d$', views_survex.threed, name="threed"), url(r'^survexfile/(?P<survex_file>.*?)\.3d$', views_survex.threed, name="threed"),
url(r'^survexfile/(?P<survex_file>.*?)\.log$', views_survex.svxraw), url(r'^survexfile/(?P<survex_file>.*?)\.log$', views_survex.svxraw),
url(r'^survexfile/(?P<survex_file>.*?)\.err$', views_survex.err), url(r'^survexfile/(?P<survex_file>.*?)\.err$', views_survex.err),
url(r'^survexfile/caves/$', views_survex.survexcaveslist, name="survexcaveslist"), url(r'^survexfile/caves/$', views_survex.survexcaveslist, name="survexcaveslist"),
url(r'^survexfile/caves/(?P<survex_cave>.*)$', views_survex.survexcavesingle, name="survexcavessingle"), url(r'^survexfile/caves/(?P<survex_cave>.*)$', views_survex.survexcavesingle, name="survexcavessingle"),
url(r'^survexfileraw/(?P<survex_file>.*?)\.svx$', views_survex.svxraw, name="svxraw"), url(r'^survexfileraw/(?P<survex_file>.*?)\.svx$', views_survex.svxraw, name="svxraw"),
(r'^survey_files/listdir/(?P<path>.*)$', view_surveys.listdir), url(r'^survey_files/listdir/(?P<path>.*)$', view_surveys.listdir),
(r'^survey_files/download/(?P<path>.*)$', view_surveys.download), url(r'^survey_files/download/(?P<path>.*)$', view_surveys.download),
#(r'^survey_files/upload/(?P<path>.*)$', view_surveys.upload), #(r'^survey_files/upload/(?P<path>.*)$', view_surveys.upload),
#(r'^survey_scans/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.SURVEY_SCANS, 'show_indexes':True}), #(r'^survey_scans/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.SURVEY_SCANS, 'show_indexes':True}),
url(r'^survey_scans/$', view_surveys.surveyscansfolders, name="surveyscansfolders"), url(r'^survey_scans/$', view_surveys.surveyscansfolders, name="surveyscansfolders"),
url(r'^survey_scans/(?P<path>[^/]+)/$', view_surveys.surveyscansfolder, name="surveyscansfolder"), url(r'^survey_scans/(?P<path>[^/]+)/$', view_surveys.surveyscansfolder, name="surveyscansfolder"),
url(r'^survey_scans/(?P<path>[^/]+)/(?P<file>[^/]+(?:png|jpg|jpeg))$', url(r'^survey_scans/(?P<path>[^/]+)/(?P<file>[^/]+(?:png|jpg|jpeg))$',
view_surveys.surveyscansingle, name="surveyscansingle"), view_surveys.surveyscansingle, name="surveyscansingle"),
url(r'^tunneldata/$', view_surveys.tunneldata, name="tunneldata"), url(r'^tunneldata/$', view_surveys.tunneldata, name="tunneldata"),
url(r'^tunneldataraw/(?P<path>.+?\.xml)$', view_surveys.tunnelfile, name="tunnelfile"), url(r'^tunneldataraw/(?P<path>.+?\.xml)$', view_surveys.tunnelfile, name="tunnelfile"),
url(r'^tunneldataraw/(?P<path>.+?\.xml)/upload$',view_surveys.tunnelfileupload, name="tunnelfileupload"), url(r'^tunneldataraw/(?P<path>.+?\.xml)/upload$',view_surveys.tunnelfileupload, name="tunnelfileupload"),
#url(r'^tunneldatainfo/(?P<path>.+?\.xml)$', view_surveys.tunnelfileinfo, name="tunnelfileinfo"), #url(r'^tunneldatainfo/(?P<path>.+?\.xml)$', view_surveys.tunnelfileinfo, name="tunnelfileinfo"),
(r'^photos/(?P<path>.*)$', 'django.views.static.serve', # url(r'^photos/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.PHOTOS_ROOT, 'show_indexes':True}),
{'document_root': settings.PHOTOS_ROOT, 'show_indexes':True}),
url(r'^prospecting/(?P<name>[^.]+).png$', prospecting_image, name="prospecting_image"), url(r'^prospecting/(?P<name>[^.]+).png$', prospecting_image, name="prospecting_image"),
# (r'^gallery/(?P<path>.*)$', 'django.views.static.serve', # (r'^gallery/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.PHOTOS_ROOT, 'show_indexes':True}),
# {'document_root': settings.PHOTOS_ROOT, 'show_indexes':True}),
# for those silly ideas # for those silly ideas
url(r'^experimental.*$', views_logbooks.experimental, name="experimental"), url(r'^experimental.*$', views_logbooks.experimental, name="experimental"),
#url(r'^trip_report/?$',views_other.tripreport,name="trip_report") #url(r'^trip_report/?$',views_other.tripreport,name="trip_report")
url(r'^(.*)_edit$', 'flatpages.views.editflatpage', name="editflatpage"), url(r'^(.*)_edit$', editflatpage, name="editflatpage"),
url(r'^(.*)$', 'flatpages.views.flatpage', name="flatpage"), url(r'^(.*)$', flatpage, name="flatpage"),
) ]
#Allow prefix to all urls #Allow prefix to all urls
urlpatterns = patterns ('', urlpatterns = [
('^%s' % settings.DIR_ROOT, include(actualurlpatterns)) url('^%s' % settings.DIR_ROOT, include(actualurlpatterns))
) ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)

View File

@@ -1,4 +1,5 @@
from django.conf import settings from django.conf import settings
from django.shortcuts import render
import random, re, logging import random, re, logging
from troggle.core.models import CaveDescription from troggle.core.models import CaveDescription
@@ -22,12 +23,12 @@ def randomLogbookSentence():
#Choose again if there are no sentances (this happens if it is a placeholder entry) #Choose again if there are no sentances (this happens if it is a placeholder entry)
while len(re.findall('[A-Z].*?\.',randSent['entry'].text))==0: while len(re.findall('[A-Z].*?\.',randSent['entry'].text))==0:
randSent['entry']=LogbookEntry.objects.order_by('?')[0] randSent['entry']=LogbookEntry.objects.order_by('?')[0]
#Choose a random sentence from that entry. Store the sentence as randSent['sentence'], and the number of that sentence in the entry as randSent['number'] #Choose a random sentence from that entry. Store the sentence as randSent['sentence'], and the number of that sentence in the entry as randSent['number']
sentenceList=re.findall('[A-Z].*?\.',randSent['entry'].text) sentenceList=re.findall('[A-Z].*?\.',randSent['entry'].text)
randSent['number']=random.randrange(0,len(sentenceList)) randSent['number']=random.randrange(0,len(sentenceList))
randSent['sentence']=sentenceList[randSent['number']] randSent['sentence']=sentenceList[randSent['number']]
return randSent return randSent
@@ -36,22 +37,22 @@ def save_carefully(objectType, lookupAttribs={}, nonLookupAttribs={}):
-if instance does not exist in DB: add instance to DB, return (new instance, True) -if instance does not exist in DB: add instance to DB, return (new instance, True)
-if instance exists in DB and was modified using Troggle: do nothing, return (existing instance, False) -if instance exists in DB and was modified using Troggle: do nothing, return (existing instance, False)
-if instance exists in DB and was not modified using Troggle: overwrite instance, return (instance, False) -if instance exists in DB and was not modified using Troggle: overwrite instance, return (instance, False)
The checking is accomplished using Django's get_or_create and the new_since_parsing boolean field The checking is accomplished using Django's get_or_create and the new_since_parsing boolean field
defined in core.models.TroggleModel. defined in core.models.TroggleModel.
""" """
instance, created=objectType.objects.get_or_create(defaults=nonLookupAttribs, **lookupAttribs) instance, created=objectType.objects.get_or_create(defaults=nonLookupAttribs, **lookupAttribs)
if not created and not instance.new_since_parsing: if not created and not instance.new_since_parsing:
for k, v in nonLookupAttribs.items(): #overwrite the existing attributes from the logbook text (except date and title) for k, v in list(nonLookupAttribs.items()): #overwrite the existing attributes from the logbook text (except date and title)
setattr(instance, k, v) setattr(instance, k, v)
instance.save() instance.save()
if created: if created:
logging.info(str(instance) + ' was just added to the database for the first time. \n') logging.info(str(instance) + ' was just added to the database for the first time. \n')
if not created and instance.new_since_parsing: if not created and instance.new_since_parsing:
logging.info(str(instance) + " has been modified using Troggle, so the current script left it as is. \n") logging.info(str(instance) + " has been modified using Troggle, so the current script left it as is. \n")
@@ -59,21 +60,6 @@ def save_carefully(objectType, lookupAttribs={}, nonLookupAttribs={}):
logging.info(str(instance) + " existed in the database unchanged since last parse. It was overwritten by the current script. \n") logging.info(str(instance) + " existed in the database unchanged since last parse. It was overwritten by the current script. \n")
return (instance, created) return (instance, created)
def render_with_context(req, *args, **kwargs):
"""this is the snippet from http://www.djangosnippets.org/snippets/3/
Django uses Context, not RequestContext when you call render_to_response.
We always want to use RequestContext, so that django adds the context from
settings.TEMPLATE_CONTEXT_PROCESSORS. This way we automatically get
necessary settings variables passed to each template. So we use a custom
method, render_response instead of render_to_response. Hopefully future
Django releases will make this unnecessary."""
from django.shortcuts import render_to_response
from django.template import RequestContext
kwargs['context_instance'] = RequestContext(req)
return render_to_response(*args, **kwargs)
re_body = re.compile(r"\<body[^>]*\>(.*)\</body\>", re.DOTALL) re_body = re.compile(r"\<body[^>]*\>(.*)\</body\>", re.DOTALL)
re_title = re.compile(r"\<title[^>]*\>(.*)\</title\>", re.DOTALL) re_title = re.compile(r"\<title[^>]*\>(.*)\</title\>", re.DOTALL)
@@ -94,7 +80,7 @@ def get_single_match(regex, text):
def href_to_wikilinks(matchobj): def href_to_wikilinks(matchobj):
""" """
Given an html link, checks for possible valid wikilinks. Given an html link, checks for possible valid wikilinks.
Returns the first valid wikilink. Valid means the target Returns the first valid wikilink. Valid means the target
object actually exists. object actually exists.
""" """
@@ -105,7 +91,7 @@ def href_to_wikilinks(matchobj):
return matchobj.group() return matchobj.group()
#except: #except:
#print 'fail' #print 'fail'
re_subs = [(re.compile(r"\<b[^>]*\>(.*?)\</b\>", re.DOTALL), r"'''\1'''"), re_subs = [(re.compile(r"\<b[^>]*\>(.*?)\</b\>", re.DOTALL), r"'''\1'''"),
(re.compile(r"\<i\>(.*?)\</i\>", re.DOTALL), r"''\1''"), (re.compile(r"\<i\>(.*?)\</i\>", re.DOTALL), r"''\1''"),
@@ -121,12 +107,12 @@ re_subs = [(re.compile(r"\<b[^>]*\>(.*?)\</b\>", re.DOTALL), r"'''\1'''"),
(re.compile(r"\<a\s+href=['\"]#([^'\"]*)['\"]\s*\>(.*?)\</a\>", re.DOTALL), r"[[cavedescription:\1|\2]]"), #assumes that all links with target ids are cave descriptions. Not great. (re.compile(r"\<a\s+href=['\"]#([^'\"]*)['\"]\s*\>(.*?)\</a\>", re.DOTALL), r"[[cavedescription:\1|\2]]"), #assumes that all links with target ids are cave descriptions. Not great.
(re.compile(r"\[\<a\s+href=['\"][^'\"]*['\"]\s+id=['\"][^'\"]*['\"]\s*\>([^\s]*).*?\</a\>\]", re.DOTALL), r"[[qm:\1]]"), (re.compile(r"\[\<a\s+href=['\"][^'\"]*['\"]\s+id=['\"][^'\"]*['\"]\s*\>([^\s]*).*?\</a\>\]", re.DOTALL), r"[[qm:\1]]"),
(re.compile(r'<a\shref="?(?P<target>.*)"?>(?P<text>.*)</a>'),href_to_wikilinks), (re.compile(r'<a\shref="?(?P<target>.*)"?>(?P<text>.*)</a>'),href_to_wikilinks),
] ]
def html_to_wiki(text, codec = "utf-8"): def html_to_wiki(text, codec = "utf-8"):
if type(text) == str: if type(text) == str:
text = unicode(text, codec) text = str(text, codec)
text = re.sub("</p>", r"", text) text = re.sub("</p>", r"", text)
text = re.sub("<p>$", r"", text) text = re.sub("<p>$", r"", text)
text = re.sub("<p>", r"\n\n", text) text = re.sub("<p>", r"\n\n", text)