forked from expo/troggle
Compare commits
651 Commits
django-upg
...
old-master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
37553da556 | ||
|
|
8861e2e240 | ||
|
|
09e9932711 | ||
|
|
7fe34bedb8 | ||
|
|
d134a58931 | ||
|
|
90a5524036 | ||
|
|
69f72184a6 | ||
|
|
e0d8df0a79 | ||
|
|
15d4defe0e | ||
|
|
9052982089 | ||
|
|
0a35824b9c | ||
|
|
bc5c0b9e53 | ||
|
|
e873dedcf2 | ||
|
|
a0c5a34b3f | ||
|
|
6c3c70a02c | ||
|
|
43394facdf | ||
|
|
d5b4a0b1d9 | ||
|
|
8feb1774bb | ||
|
|
d55a58bfc8 | ||
|
|
fffb083aee | ||
|
|
b9aa447cac | ||
|
|
932b1a2ae3 | ||
|
|
367854c9a6 | ||
|
|
c76aed3bf6 | ||
|
|
079f528963 | ||
|
|
972e6f3a95 | ||
|
|
7af6c3cb9c | ||
|
|
501a5122d8 | ||
|
|
35f85c55f1 | ||
|
|
b69bdcd126 | ||
|
|
49d5857b36 | ||
|
|
40ad04b79f | ||
|
|
a3e564855a | ||
|
|
15d0d05185 | ||
|
|
819eca5dea | ||
|
|
edbe793c68 | ||
|
|
e017c6effc | ||
|
|
d4ac28af18 | ||
|
|
931aa4e3cb | ||
|
|
cc4017e481 | ||
|
|
38adb9a52f | ||
|
|
ccc5813b3f | ||
|
|
314d0e8b71 | ||
|
|
0338889905 | ||
|
|
876cd8909f | ||
|
|
ac7cb45f61 | ||
|
|
f326bf9148 | ||
|
|
b1596c0ac4 | ||
|
|
13d3f37f05 | ||
|
|
e4290c4ab0 | ||
|
|
2918b4b92c | ||
|
|
39c622d5bf | ||
|
|
76a6b501f3 | ||
|
|
ecf92e2079 | ||
|
|
b4c0c4d219 | ||
|
|
4be8c81291 | ||
|
|
a8460065a4 | ||
|
|
2b39dec560 | ||
| 0b85a9d330 | |||
| b123f6ada7 | |||
| e5c288c764 | |||
| 9db7d8e589 | |||
| 5e48687347 | |||
| 09bbf81915 | |||
| 78f8ea2b5b | |||
| e08b4275a9 | |||
| ac9f3cf061 | |||
| 98fd314a62 | |||
| 79a31a41f9 | |||
| 6aae9083c3 | |||
| d71e31417b | |||
| fbe6c0c859 | |||
| 53b797fb53 | |||
| 98eb9173ee | |||
| ecfa95310d | |||
| 0e75a9163b | |||
|
|
59633d94f5 | ||
| 53206ad1d7 | |||
| 9aa91bf3e2 | |||
| 867479e05d | |||
| bb1f69dd90 | |||
| d219f7b966 | |||
| 3f812e5275 | |||
| cdef395f89 | |||
|
|
66f6a9ce90 | ||
|
|
b07c888c7a | ||
|
|
d170a3c36e | ||
| 429c21a8e9 | |||
|
|
8c10908353 | ||
|
|
e0963a1c39 | ||
|
|
e77aa9fb84 | ||
|
|
f5fe2d9e33 | ||
|
|
5006342b7b | ||
|
|
3ce8b67b4f | ||
|
|
52cec290d9 | ||
|
|
a559151c57 | ||
|
|
2fc60f9f74 | ||
|
|
3b1fcb7feb | ||
|
|
2838f540d1 | ||
|
|
f5ec5a61a9 | ||
|
|
44caf35fd8 | ||
|
|
c5055e7f34 | ||
|
|
de14ecea22 | ||
|
|
f5174a3248 | ||
|
|
f0889ce0f8 | ||
|
|
b6dc711c14 | ||
|
|
04fb2e8701 | ||
|
|
c1439bed8d | ||
|
|
a88f326ee6 | ||
| 56618dbe65 | |||
|
|
71ef710d09 | ||
|
|
c74852b60b | ||
|
|
a26109cb30 | ||
|
|
6b5b9a5315 | ||
|
|
4ebf3d8a0e | ||
| 37d02b298d | |||
|
|
d6053322e8 | ||
|
|
5b5f385b67 | ||
|
|
04428c45c9 | ||
| a7f605ced9 | |||
|
|
0adb8e528d | ||
|
|
f4280f9907 | ||
| 2d7892e3b1 | |||
|
|
8edeb2f622 | ||
|
|
d157a081b1 | ||
|
|
fcc57cf365 | ||
| 12c8ab41bf | |||
|
|
9266e5460e | ||
|
|
ad45859071 | ||
|
|
ee759980c4 | ||
|
|
18b371bc15 | ||
|
|
9e77b8bb75 | ||
|
|
e6acd4bdbd | ||
|
|
424219fb6f | ||
|
|
2ebb37552f | ||
|
|
822359fe51 | ||
|
|
97426a0ddb | ||
|
|
3f78382d45 | ||
|
|
8a1be45aac | ||
|
|
b5cca8be3b | ||
|
|
4d2f9a2b39 | ||
|
|
8fe02e5c89 | ||
|
|
b2dd905f0e | ||
|
|
c06d372984 | ||
|
|
7a9aef6faf | ||
|
|
6889ae9fa3 | ||
|
|
02d3cc84d5 | ||
|
|
768ec83037 | ||
|
|
b42249890e | ||
|
|
2f9870644b | ||
|
|
cc313246bb | ||
|
|
4e187581b3 | ||
|
|
bfe018cde6 | ||
|
|
dc479b33c5 | ||
|
|
ae284a1f30 | ||
|
|
f1736c53c4 | ||
|
|
23df89cf31 | ||
|
|
05c5e26e99 | ||
|
|
d1d0c24ed8 | ||
|
|
c4301cf6df | ||
|
|
b3089fafe9 | ||
|
|
de7d68b1eb | ||
|
|
e913a56a6b | ||
|
|
bb8dbb381f | ||
|
|
39c61bd526 | ||
|
|
144610d6c2 | ||
|
|
10f1cdb458 | ||
|
|
40f413ba47 | ||
|
|
a588221524 | ||
|
|
9cd8734947 | ||
|
|
9df91b221b | ||
|
|
c8551991b2 | ||
|
|
64a4842dcb | ||
|
|
f666b9c396 | ||
|
|
a4532a29da | ||
|
|
5469794159 | ||
|
|
705dd51f30 | ||
|
|
1e26578305 | ||
|
|
ddb62f2897 | ||
|
|
8b5f81c8f8 | ||
|
|
f8be510509 | ||
|
|
27af84da65 | ||
|
|
121f0a6aac | ||
|
|
9646c32819 | ||
|
|
8932bdc466 | ||
|
|
c3ab5c6096 | ||
|
|
9fa93fdd15 | ||
|
|
7a7433bc84 | ||
|
|
b4296f1736 | ||
|
|
ff8c5ef0c1 | ||
|
|
1bac650aee | ||
|
|
a22b42e832 | ||
|
|
9fc80bed35 | ||
|
|
afa5a8b940 | ||
|
|
59f8647e0f | ||
|
|
f593104c04 | ||
|
|
384b0438b4 | ||
|
|
dc6d89b0ca | ||
|
|
e01507d541 | ||
| 9a7a1728a4 | |||
| 240c7eff10 | |||
| 6b59e3a689 | |||
| b505a26ce4 | |||
| ce268ec306 | |||
| a5e1529514 | |||
|
|
42d10cf43d | ||
|
|
2226aa34d5 | ||
|
|
0268ff46b3 | ||
| 1d7cf3f41a | |||
| 32c186afd7 | |||
| 54a9f7a37c | |||
| e4e8cc5993 | |||
| 8703ed5d94 | |||
| a4118261e1 | |||
| 6392c1f238 | |||
| 4148ece133 | |||
| c724f292ca | |||
| 53513b812b | |||
| beffdbd89d | |||
| 8bd0df1bab | |||
| 4ae43e94f4 | |||
| da88771fd4 | |||
| b6b7d2aa12 | |||
| c733b0f2eb | |||
| 9712bf6dfd | |||
| 5e4c1493a1 | |||
| 41b1334257 | |||
| a2fcbae129 | |||
| e9077542c9 | |||
| 79595521a9 | |||
| 38b658fd3f | |||
| a89123755c | |||
| 0fb9accd05 | |||
| f87df707ab | |||
| a2cb771fc1 | |||
| c888f59ff0 | |||
| 43ff6e09be | |||
| 810ab3ea4f | |||
| cb5978237b | |||
| 622d523c98 | |||
| ee7d2529e7 | |||
| 82de967f97 | |||
| 466e667e14 | |||
| 3c563ce665 | |||
| 19a061efa8 | |||
| a397eb9d00 | |||
| e5d864359a | |||
| b2adc285b6 | |||
| 8af604262d | |||
| b33ca2b290 | |||
| c4455168c6 | |||
| 1b4674acde | |||
| 4fac4317a3 | |||
| 78bf9986b7 | |||
| 5154c0d8e5 | |||
| b01fcc3a6d | |||
| e8585bec42 | |||
| 521f0241f8 | |||
| 0394becdac | |||
| e5fa636776 | |||
| 6beaf4afdd | |||
| 822812525e | |||
| a4a92483bd | |||
| 3254ba1443 | |||
| 4c3d0ce7fa | |||
| a99afe07c6 | |||
| 73bb60eff9 | |||
| 0a214c5d4b | |||
| 29c53f35ab | |||
| 3746dab5de | |||
| 18dbadd675 | |||
| ee2cd0d391 | |||
| 0cc4e7c7d3 | |||
|
|
478065786f | ||
|
|
e64d82cd92 | ||
|
|
12a991920a | ||
| 0758efb3ec | |||
| 54b782c67e | |||
| 78a5f656b9 | |||
| 6e23853759 | |||
| becfaa1504 | |||
|
|
77a6015ad6 | ||
|
|
7c15a7439d | ||
|
|
b4f4db5754 | ||
|
|
c6656e6642 | ||
|
|
e6fa54d0e5 | ||
|
|
f16b4e3f47 | ||
|
|
4ad5b68433 | ||
|
|
552730f0a3 | ||
|
|
a1f02e575f | ||
|
|
f58b1db920 | ||
|
|
3d2ac06a72 | ||
|
|
9802f45452 | ||
|
|
1ad58d6b5d | ||
|
|
6805bcb690 | ||
|
|
c162411f0b | ||
|
|
10a05d686e | ||
|
|
89ef5c19ff | ||
|
|
4385ce86c1 | ||
|
|
46124a770f | ||
|
|
6f6327d267 | ||
|
|
6710a469ee | ||
|
|
174c475ec7 | ||
|
|
d3b42a125d | ||
|
|
2f2f4d396d | ||
|
|
e1eea7088f | ||
|
|
760fa3114f | ||
|
|
798ae591c6 | ||
|
|
7877efba0a | ||
|
|
cfa888fde6 | ||
|
|
cedcb0988a | ||
|
|
c939013b14 | ||
|
|
458d0e1ebc | ||
|
|
776152ef47 | ||
|
|
9f285a9f34 | ||
|
|
302ad0632e | ||
|
|
ffb5d7bdda | ||
|
|
242cf4741a | ||
|
|
41a14f161d | ||
|
|
f0e1406c5f | ||
|
|
d7c6676c49 | ||
|
|
5e9dfc6ea6 | ||
|
|
27fca090fc | ||
|
|
716131f005 | ||
|
|
496280f3e6 | ||
|
|
0dd0951b28 | ||
|
|
b9597fbb57 | ||
|
|
edc6591554 | ||
|
|
560b9bf985 | ||
|
|
6652e3f160 | ||
|
|
b0f1f73ce4 | ||
|
|
214d887c57 | ||
|
|
6b16724c2a | ||
|
|
f1bb927063 | ||
|
|
eeda1bed73 | ||
|
|
751ec9517f | ||
|
|
228814be33 | ||
|
|
cebcbeb73a | ||
|
|
057b09dca9 | ||
|
|
480541ae54 | ||
|
|
60303d041c | ||
|
|
5a911ecec7 | ||
|
|
7056f9a8b2 | ||
|
|
34036581f2 | ||
|
|
dcc67fddda | ||
|
|
03cad0a37f | ||
|
|
a4651eaa0a | ||
|
|
7aed3d3b30 | ||
|
|
4771f52b20 | ||
|
|
77ad85b05c | ||
|
|
01d877d26e | ||
|
|
e84d990366 | ||
|
|
e06be10f7f | ||
|
|
fe6750e824 | ||
|
|
d29fe2ee1c | ||
|
|
1156b1d3ea | ||
|
|
126a10cf94 | ||
|
|
4560e0da84 | ||
|
|
f9c2e0e170 | ||
|
|
cf413dd03c | ||
|
|
4965678443 | ||
|
|
67f94f9436 | ||
|
|
1186662960 | ||
|
|
3010961383 | ||
|
|
806fd41130 | ||
|
|
af07161f05 | ||
|
|
5ff759db93 | ||
|
|
7f292d402b | ||
|
|
c180780da9 | ||
|
|
d75862bc41 | ||
|
|
7cdb603d75 | ||
|
|
94c44b0d7b | ||
|
|
4a3d181097 | ||
|
|
d8863dca48 | ||
|
|
e0c439e850 | ||
|
|
f4f1b3ca6d | ||
|
|
4a93790c7e | ||
|
|
5265acd9dc | ||
|
|
9f69bb5fca | ||
|
|
b1d6e1c3d5 | ||
|
|
659703b221 | ||
|
|
3869bd536e | ||
|
|
408d154d3f | ||
|
|
44e3eb8a18 | ||
|
|
51a3cecc02 | ||
|
|
6b4ea7b83e | ||
|
|
da71cca22f | ||
|
|
5c945e3431 | ||
|
|
ba5bc365c1 | ||
|
|
c362b1b529 | ||
|
|
f90b6dc7ab | ||
|
|
a6a9016548 | ||
|
|
5351108ec1 | ||
|
|
7759e481d4 | ||
|
|
69c3a06c98 | ||
|
|
d1ad8730d7 | ||
|
|
f3a570a21d | ||
|
|
f626d3304d | ||
|
|
7eb4c89bf0 | ||
|
|
9435be0f19 | ||
|
|
7f108f6d9a | ||
|
|
3f98470af8 | ||
|
|
e58b69782c | ||
|
|
e49e22b37c | ||
|
|
82e69b4f05 | ||
|
|
ea9266ecf9 | ||
|
|
99ea6778ad | ||
|
|
ccd80e74f8 | ||
|
|
3057d2a232 | ||
|
|
d1ac659d4f | ||
|
|
bb1989d0f0 | ||
|
|
418e5e1d3f | ||
|
|
3b12e6d975 | ||
|
|
54d7f1d097 | ||
|
|
cfc90deb83 | ||
|
|
1a0e577606 | ||
|
|
a05fe94d90 | ||
|
|
8e64062214 | ||
|
|
8c1882eec8 | ||
|
|
8dd51096cf | ||
|
|
ecd5bbcb1d | ||
|
|
6d5babd331 | ||
|
|
79b7d32664 | ||
|
|
dd66ad835a | ||
|
|
a29fd964bd | ||
|
|
1ef274ec1d | ||
|
|
0f5627505f | ||
|
|
c0782e1cca | ||
|
|
ed1d273e03 | ||
|
|
9654e5da1c | ||
|
|
8040b746b4 | ||
|
|
05004aa874 | ||
|
|
4a21720745 | ||
|
|
13cb2e9b0f | ||
|
|
0259947cda | ||
|
|
080684e56f | ||
|
|
4b269bb234 | ||
|
|
1a62931202 | ||
|
|
c2029df3c9 | ||
|
|
4a074295ad | ||
|
|
711fefb0da | ||
|
|
fd12e70f78 | ||
|
|
fac89bae30 | ||
|
|
ab97e367cb | ||
|
|
ae693ca4c5 | ||
|
|
77dea07b40 | ||
|
|
77dcf7f759 | ||
|
|
59e7c4d5df | ||
|
|
0b5e57b85e | ||
|
|
c623acf832 | ||
|
|
36b1888f46 | ||
|
|
c09a668620 | ||
|
|
e85c386375 | ||
|
|
c66ecc4d7f | ||
|
|
13fe89af9f | ||
|
|
d8fe39ae86 | ||
|
|
5f5359f933 | ||
|
|
e820a516de | ||
|
|
e9fdea80c0 | ||
|
|
9534bd8881 | ||
|
|
5be508620e | ||
|
|
82e968d5c7 | ||
|
|
b4b060a962 | ||
|
|
64e5e9d45c | ||
|
|
881215e815 | ||
|
|
35cd983cc9 | ||
|
|
0a70039dee | ||
|
|
18ccc57f87 | ||
|
|
c23fcc5b06 | ||
|
|
21ff3b8b5d | ||
|
|
97c388dba0 | ||
|
|
10799e2ce3 | ||
|
|
7ef6b1fcc2 | ||
|
|
7a220b4c87 | ||
|
|
dc1327674c | ||
|
|
c8ff8e3ef6 | ||
|
|
f766df597c | ||
|
|
bab92cb88c | ||
|
|
5d8a5494cd | ||
|
|
129d93dfa7 | ||
|
|
65c55f0f21 | ||
|
|
8578a3097a | ||
|
|
de5f68e42c | ||
|
|
f44b0be459 | ||
|
|
a128401d49 | ||
|
|
5075ded032 | ||
|
|
47c2e87979 | ||
|
|
53352e7987 | ||
|
|
44f86a7d6f | ||
|
|
c37124d9c4 | ||
|
|
69ab1e0249 | ||
|
|
2fd8052ac2 | ||
|
|
28924db9f8 | ||
|
|
50545af223 | ||
|
|
30829ff9c8 | ||
|
|
ede9e4a9bd | ||
|
|
04d0e80430 | ||
|
|
366d4736ca | ||
|
|
f3391a912e | ||
|
|
52eb4030d0 | ||
|
|
835680f0ee | ||
|
|
cdf54e0f9b | ||
|
|
b439d40120 | ||
|
|
cb744ddeef | ||
|
|
872ffe5882 | ||
|
|
671e946c6d | ||
|
|
3928609c29 | ||
|
|
e942c839a1 | ||
|
|
bff34aafb9 | ||
|
|
7623943f3e | ||
|
|
6d7691791a | ||
|
|
b001df1f53 | ||
|
|
1cc7f2d92e | ||
|
|
7a0a898bc6 | ||
|
|
41aca4e2d7 | ||
|
|
7e89b12004 | ||
|
|
7bac9f829e | ||
|
|
2435639498 | ||
|
|
2be3e4ce9d | ||
|
|
1294444026 | ||
|
|
7578b65573 | ||
|
|
ced45c92f7 | ||
|
|
f21cddb2d0 | ||
|
|
735b729a41 | ||
|
|
c5b933f922 | ||
|
|
ce6fe2590d | ||
|
|
7509a76eb0 | ||
|
|
41eaa06e55 | ||
|
|
7429749004 | ||
|
|
709f9954f4 | ||
|
|
29adaa03c6 | ||
|
|
9f169fb2b9 | ||
|
|
6b8294d9dc | ||
|
|
0ea70273fe | ||
|
|
c66b5e2dad | ||
|
|
9077462893 | ||
|
|
7158a79a34 | ||
|
|
68060d6118 | ||
|
|
ddbdc73e7e | ||
|
|
263b640641 | ||
|
|
84ad39f24a | ||
|
|
408a4c79aa | ||
|
|
b9bbccfe00 | ||
|
|
05d262e42b | ||
|
|
18e61d19f5 | ||
|
|
4a073ea161 | ||
|
|
2993ca74cc | ||
|
|
1566923d5c | ||
|
|
b0073caf5f | ||
|
|
8ad044cb2c | ||
|
|
8a9eb32aaf | ||
|
|
7f2199405d | ||
|
|
38a545e174 | ||
|
|
4f0271ad49 | ||
|
|
7fc1602f7a | ||
|
|
aa26690e33 | ||
|
|
09581829d1 | ||
|
|
3afb94f5d2 | ||
|
|
29f084613d | ||
|
|
dd76a1a0be | ||
|
|
c132477f80 | ||
|
|
92635f6f68 | ||
|
|
65ef255b99 | ||
|
|
854fe85132 | ||
|
|
4da6203828 | ||
|
|
7db1aae5ee | ||
|
|
b4388d838e | ||
|
|
8446047ab2 | ||
|
|
dc19150eba | ||
|
|
a89139763f | ||
|
|
dab138c731 | ||
|
|
205a73917d | ||
|
|
ae3fe8cd42 | ||
|
|
c0b274767b | ||
|
|
620040bde1 | ||
|
|
22aa9990a5 | ||
|
|
16b7404d9b | ||
|
|
db5e315db0 | ||
|
|
4c87ce59d3 | ||
|
|
ca7bc171c9 | ||
|
|
b55b17ccc1 | ||
|
|
59830c80af | ||
|
|
b4a63eca02 | ||
|
|
0306723c95 | ||
|
|
af9743026e | ||
|
|
9b44731c33 | ||
|
|
5946e159bc | ||
|
|
327ea9cacf | ||
|
|
6d6991e266 | ||
|
|
e4ea57932e | ||
|
|
484a17d496 | ||
|
|
1d421b2d7c | ||
|
|
4ce282b88b | ||
|
|
85ada36973 | ||
|
|
a3e42d3b19 | ||
|
|
542f55d43e | ||
|
|
d87f221a2b | ||
|
|
6237a19d17 | ||
|
|
17175637dc | ||
|
|
32b5c7fbb0 | ||
|
|
ef47d092e6 | ||
|
|
8648c85b67 | ||
|
|
657c37d45c | ||
|
|
006becf6ca | ||
|
|
012d948193 | ||
|
|
a048adcdac | ||
|
|
b091e8eb09 | ||
|
|
14b39d906c | ||
|
|
0508ba299c | ||
|
|
02db5a9170 | ||
|
|
93a68ff43e | ||
|
|
97e423ba86 | ||
|
|
3033f1eecd | ||
|
|
f4405a16f1 | ||
|
|
025b743070 | ||
|
|
e27f5565cb | ||
|
|
7fe5cd6ede | ||
|
|
7052355596 | ||
|
|
1e6d1a9f2f | ||
|
|
a776c6ba13 | ||
|
|
75f782ab71 | ||
|
|
832f56a6d0 | ||
|
|
f6d3a7c84e | ||
|
|
7769a35f07 | ||
|
|
c38dfd20a1 | ||
|
|
83634fe95a | ||
|
|
e336e9c770 | ||
|
|
3ac1169aa7 | ||
|
|
3d8a6fb55a | ||
|
|
891b3abb44 | ||
|
|
01b0980c44 | ||
|
|
2c2f11be39 | ||
|
|
d71078d03d | ||
|
|
12009e36df | ||
|
|
21c39f70de | ||
|
|
7566faf77b | ||
|
|
f27d5988f0 | ||
|
|
d8a215a575 | ||
|
|
118d132797 | ||
|
|
06487e5534 | ||
|
|
c0b73d4777 | ||
|
|
e9e755b517 | ||
|
|
191619e6d8 | ||
|
|
0f64e786b5 | ||
|
|
7164296c9d | ||
|
|
787445c071 | ||
|
|
d9d119c0c9 | ||
|
|
c45eb31e8f | ||
|
|
b31d022c1a | ||
|
|
919c7e932a | ||
|
|
9489fe56d9 |
36
.gitignore
vendored
Normal file
36
.gitignore
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# use glob syntax
|
||||
syntax: glob
|
||||
|
||||
*.pyc
|
||||
db*
|
||||
localsettings.py
|
||||
*~
|
||||
parsing_log.txt
|
||||
troggle
|
||||
troggle_log.txt
|
||||
.idea/*
|
||||
*.orig
|
||||
media/images/*
|
||||
.vscode/*
|
||||
.swp
|
||||
imagekit-off/
|
||||
localsettings-expo-live.py
|
||||
.gitignore
|
||||
desktop.ini
|
||||
troggle-reset.log
|
||||
troggle-reset0.log
|
||||
troggle-surveys.log
|
||||
troggle.log
|
||||
troggle.sqlite
|
||||
troggle.sqlite.0
|
||||
troggle.sqlite.1
|
||||
my_project.dot
|
||||
memdump.sql
|
||||
troggle-sqlite.sql
|
||||
import_profile.json
|
||||
import_times.json
|
||||
ignored-files.log
|
||||
tunnel-import.log
|
||||
posnotfound
|
||||
troggle.sqlite-journal
|
||||
loadsurvexblks.log
|
||||
16
.hgignore
16
.hgignore
@@ -1,16 +0,0 @@
|
||||
# use glob syntax
|
||||
syntax: glob
|
||||
|
||||
*.pyc
|
||||
db*
|
||||
localsettings.py
|
||||
*~
|
||||
parsing_log.txt
|
||||
troggle
|
||||
troggle_log.txt
|
||||
.idea/*
|
||||
*.orig
|
||||
media/images/*
|
||||
.vscode/*
|
||||
.swp
|
||||
imagekit-off/
|
||||
114
README.txt
114
README.txt
@@ -1,6 +1,6 @@
|
||||
Troggle is an application for caving expedition data management, originally created for use on Cambridge University Caving Club (CUCC)expeditions and licensed under the GNU Lesser General Public License.
|
||||
|
||||
Troggle has been forked into two projects. The original one is maintained by Aron Curtis and is used for Erebus caves. The CUCC variant uses files as the definitive data, not the database and lives at expo.sruvex.com/troggle.
|
||||
Troggle has been forked into two projects. The original one is maintained by Aron Curtis and is used for Erebus caves. The CUCC variant uses files as the definitive data, not the database and lives at expo.survex.com/troggle.
|
||||
|
||||
Troggle setup
|
||||
==========
|
||||
@@ -8,39 +8,135 @@ Troggle setup
|
||||
Python, Django, and Database setup
|
||||
-----------------------------------
|
||||
Troggle requires Django 1.4 or greater, and any version of Python that works with it.
|
||||
It is currently (Feb.2020) on django 1.7.11 (1.7.11-1+deb8u5).
|
||||
Install Django with the following command:
|
||||
|
||||
apt-get install python-django (on debian/ubuntu)
|
||||
sudo apt install python-django (on debian/ubuntu) -- does not work now as we need specific version
|
||||
|
||||
If you want to use MySQL or Postgresql, download and install them. However, you can also use Django with Sqlite3, which is included in Python and thus requires no extra installation.
|
||||
requirements.txt:
|
||||
Django==1.7.11
|
||||
django-registration==2.1.2
|
||||
mysql
|
||||
#imagekit
|
||||
#django-imagekit
|
||||
Image
|
||||
django-tinymce==2.7.0
|
||||
smartencoding
|
||||
unidecode
|
||||
|
||||
Install like this:
|
||||
sudo apt install pip # does not work on Ubuntu 20.04 for python 2.7. Have to install from source. Use 18.04
|
||||
pip install django==1.7
|
||||
pip install django-tinymce==2.0.1
|
||||
sudo apt install libfreetype6-dev
|
||||
pip install django-registration==2.0
|
||||
pip install unidecode
|
||||
pip install --no-cache-dir pillow==2.7.0 # fails horribly on installing Ubuntu 20.04
|
||||
pip install --no-cache-dir pillow # installs on Ubuntu 20.04 , don't know if it works though
|
||||
|
||||
If you want to use MySQL or Postgresql, download and install them.
|
||||
However, you can also use Django with Sqlite3, which is included in Python and thus requires no extra installation.
|
||||
pip install pygraphviz
|
||||
apt install survex
|
||||
|
||||
pip install django-extensions
|
||||
pip install pygraphviz # fails to install
|
||||
pip install pyparsing pydot # installs fine
|
||||
django extension graph_models # https://django-extensions.readthedocs.io/en/latest/graph_models.html
|
||||
|
||||
Or use a python3 virtual environment: (python3.5 not later)
|
||||
$ cd troggle
|
||||
$ cd ..
|
||||
$ python3.5 -m venv pyth35d2
|
||||
(creates folder with virtual env)
|
||||
cd pyth35d2
|
||||
bin/activate
|
||||
(now install everything - not working yet..)
|
||||
$ pip install -r requirements.txt
|
||||
|
||||
MariaDB database
|
||||
----------------
|
||||
Start it up with
|
||||
$ sudo mysql -u -p
|
||||
when it will prompt you to type in the password. Get this by reading the settings.py file in use on the server.
|
||||
then
|
||||
> CREATE DATABASE troggle;
|
||||
> use troggle;
|
||||
> exit;
|
||||
|
||||
Note the semicolons.
|
||||
|
||||
You can check the status of the db service:
|
||||
$ sudo systemctl status mysql
|
||||
|
||||
You can start and stop the db service with
|
||||
$ sudo systemctl restart mysql.service
|
||||
$ sudo systemctl stop mysql.service
|
||||
$ sudo systemctl start mysql.service
|
||||
|
||||
Troggle itself
|
||||
-------------
|
||||
Choose a directory where you will keep troggle, and svn check out Troggle into it using the following command:
|
||||
Choose a directory where you will keep troggle, and git clone Troggle into it using the following command:
|
||||
|
||||
svn co http://troggle.googlecode.com/svn/
|
||||
git clone git://expo.survex.com/troggle
|
||||
or more reliably
|
||||
git clone ssh://expo@expo.survex.com/home/expo/troggle
|
||||
|
||||
|
||||
If you want to work on the source code and be able to commit, you will need to use https instead of http, and your google account will need to be added to the troggle project members list. Contact aaron dot curtis at cantab dot net to get this set up.
|
||||
If you want to work on the source code and be able to commit, your account will need to be added to the troggle project members list. Contact wookey at wookware dot org to get this set up.
|
||||
|
||||
Next, you need to fill in your local settings. Copy either localsettingsubuntu.py or localsettingsserver.py to a new file called localsettings.py. Follow the instructions contained in the file to fill out your settings.
|
||||
|
||||
Setting up survex
|
||||
-----------------
|
||||
You need to have survex installed as the command line 'cavern' is used as part of the survex
|
||||
import process.
|
||||
|
||||
Setting up tables and importing legacy data
|
||||
------------------------------------------
|
||||
Run "python databaseReset.py reset" from the troggle directory.
|
||||
Run "sudo python databaseReset.py reset" from the troggle directory.
|
||||
|
||||
|
||||
Once troggle is running, you can also log in and then go to "Import / export" data under "admin" on the menu.
|
||||
|
||||
Adding a new year/expedition requires adding a column to the
|
||||
noinfo/folk.csv table - a year doesn't exist until that is done.
|
||||
folk/folk.csv table - a year doesn't exist until that is done.
|
||||
|
||||
|
||||
Running a Troggle server
|
||||
------------------------
|
||||
For high volume use, Troggle should be run using a web server like apache. However, a quick way to get started is to use the development server built into Django.
|
||||
For high volume use, Troggle should be run using a web server like apache. However, a quick way to get started is to use the development server built into Django. This is limited though: directory
|
||||
redirection needs apache.
|
||||
|
||||
To do this, run "python manage.py runserver" from the troggle directory.
|
||||
|
||||
|
||||
Running a Troggle server with Apache
|
||||
------------------------------------
|
||||
Troggle also needs these aliases to be configured. These are set in
|
||||
/home/expo/config/apache/expo.conf
|
||||
on the expo server.
|
||||
|
||||
At least these need setting:
|
||||
DocumentRoot /home/expo/expoweb
|
||||
WSGIScriptAlias / /home/expo/troggle/wsgi.py
|
||||
<Directory /home/expo/troggle>
|
||||
<Files wsgi.py>
|
||||
Require all granted
|
||||
</Files>
|
||||
</Directory>
|
||||
|
||||
Alias /expofiles /home/expo/expofiles
|
||||
Alias /photos /home/expo/webphotos
|
||||
Alias /map /home/expo/expoweb/map
|
||||
Alias /javascript /usr/share/javascript
|
||||
Alias /static/ /home/expo/static/
|
||||
ScriptAlias /repositories /home/expo/config/apache/services/hgweb/hgweb.cgi
|
||||
|
||||
(The last is just for mercurial which will be remoived during 2020).
|
||||
|
||||
Unlike the "runserver" method, apache requires a restart before it will use
|
||||
any changed files:
|
||||
|
||||
apache2ctl stop
|
||||
apache2ctl start
|
||||
|
||||
27
README/index.html
Normal file
27
README/index.html
Normal file
@@ -0,0 +1,27 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
<title>Troggle - Coding Documentation</title>
|
||||
<link rel="stylesheet" type="text/css" href="..media/css/main2.css" />
|
||||
</head>
|
||||
<body>
|
||||
<h1>Troggle Code - README</h1>
|
||||
<h2>Contents of README.txt file</h2>
|
||||
|
||||
<iframe name="erriframe" width="90%" height="45%"
|
||||
src="../readme.txt" frameborder="1" ></iframe>
|
||||
|
||||
<h2>Troggle documentation in the Expo Handbook</h2>
|
||||
<ul>
|
||||
<li><a href="http://expo.survex.com/handbook/troggle/trogintro.html">Intro</a>
|
||||
<li><a href="http://expo.survex.com/handbook/troggle/trogmanual.html">Troggle manual</a>
|
||||
<li><a href="http://expo.survex.com/handbook/troggle/trogarch.html">Troggle data model</a>
|
||||
<li><a href="http://expo.survex.com/handbook/troggle/trogimport.html">Troggle importing data</a>
|
||||
<li><a href="http://expo.survex.com/handbook/troggle/trogdesign.html">Troggle design decisions</a>
|
||||
<li><a href="http://expo.survex.com/handbook/troggle/trogdesignx.html">Troggle future architectures</a>
|
||||
<li><a href="http://expo.survex.com/handbook/troggle/trogsimpler.html">a kinder simpler Troggle?</a>
|
||||
|
||||
</ul>
|
||||
<hr />
|
||||
</body></html>
|
||||
@@ -50,10 +50,10 @@ class QMsFoundInline(admin.TabularInline):
|
||||
extra=1
|
||||
|
||||
|
||||
class PhotoInline(admin.TabularInline):
|
||||
model = DPhoto
|
||||
exclude = ['is_mugshot' ]
|
||||
extra = 1
|
||||
# class PhotoInline(admin.TabularInline):
|
||||
# model = DPhoto
|
||||
# exclude = ['is_mugshot' ]
|
||||
# extra = 1
|
||||
|
||||
|
||||
class PersonTripInline(admin.TabularInline):
|
||||
@@ -67,18 +67,19 @@ class LogbookEntryAdmin(TroggleModelAdmin):
|
||||
prepopulated_fields = {'slug':("title",)}
|
||||
search_fields = ('title','expedition__year')
|
||||
date_heirarchy = ('date')
|
||||
inlines = (PersonTripInline, PhotoInline, QMsFoundInline)
|
||||
# inlines = (PersonTripInline, PhotoInline, QMsFoundInline)
|
||||
inlines = (PersonTripInline, QMsFoundInline)
|
||||
class Media:
|
||||
css = {
|
||||
"all": ("css/troggleadmin.css",)
|
||||
}
|
||||
actions=('export_logbook_entries_as_html','export_logbook_entries_as_txt')
|
||||
|
||||
def export_logbook_entries_as_html(modeladmin, request, queryset):
|
||||
def export_logbook_entries_as_html(self, modeladmin, request, queryset):
|
||||
response=downloadLogbook(request=request, queryset=queryset, extension='html')
|
||||
return response
|
||||
|
||||
def export_logbook_entries_as_txt(modeladmin, request, queryset):
|
||||
def export_logbook_entries_as_txt(self, modeladmin, request, queryset):
|
||||
response=downloadLogbook(request=request, queryset=queryset, extension='txt')
|
||||
return response
|
||||
|
||||
@@ -116,7 +117,7 @@ class EntranceAdmin(TroggleModelAdmin):
|
||||
search_fields = ('caveandentrance__cave__kataster_number',)
|
||||
|
||||
|
||||
admin.site.register(DPhoto)
|
||||
#admin.site.register(DPhoto)
|
||||
admin.site.register(Cave, CaveAdmin)
|
||||
admin.site.register(Area)
|
||||
#admin.site.register(OtherCaveName)
|
||||
@@ -139,16 +140,17 @@ admin.site.register(SurvexStation)
|
||||
admin.site.register(SurvexScansFolder)
|
||||
admin.site.register(SurvexScanSingle)
|
||||
|
||||
admin.site.register(DataIssue)
|
||||
|
||||
def export_as_json(modeladmin, request, queryset):
|
||||
response = HttpResponse(mimetype="text/json")
|
||||
response = HttpResponse(content_type="text/json")
|
||||
response['Content-Disposition'] = 'attachment; filename=troggle_output.json'
|
||||
serializers.serialize("json", queryset, stream=response)
|
||||
return response
|
||||
|
||||
|
||||
def export_as_xml(modeladmin, request, queryset):
|
||||
response = HttpResponse(mimetype="text/xml")
|
||||
response = HttpResponse(content_type="text/xml")
|
||||
response['Content-Disposition'] = 'attachment; filename=troggle_output.xml'
|
||||
serializers.serialize("xml", queryset, stream=response)
|
||||
return response
|
||||
|
||||
@@ -46,12 +46,12 @@ class EntranceForm(ModelForm):
|
||||
#underground_centre_line = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
|
||||
#notes = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
|
||||
#references = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
|
||||
other_station = forms.CharField(required=False) # Trying to change this to a singl;e line entry
|
||||
tag_station = forms.CharField(required=False) # Trying to change this to a singl;e line entry
|
||||
exact_station = forms.CharField(required=False) # Trying to change this to a singl;e line entry
|
||||
northing = forms.CharField(required=False) # Trying to change this to a singl;e line entry
|
||||
easting = forms.CharField(required=False) # Trying to change this to a singl;e line entry
|
||||
alt = forms.CharField(required=False) # Trying to change this to a singl;e line entry
|
||||
other_station = forms.CharField(required=False) # Trying to change this to a single line entry
|
||||
tag_station = forms.CharField(required=False) # Trying to change this to a single line entry
|
||||
exact_station = forms.CharField(required=False) # Trying to change this to a single line entry
|
||||
northing = forms.CharField(required=False) # Trying to change this to a single line entry
|
||||
easting = forms.CharField(required=False) # Trying to change this to a single line entry
|
||||
alt = forms.CharField(required=False) # Trying to change this to a single line entry
|
||||
class Meta:
|
||||
model = Entrance
|
||||
exclude = ("cached_primary_slug", "filename",)
|
||||
@@ -123,7 +123,7 @@ def getTripForm(expedition):
|
||||
html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30}))
|
||||
|
||||
def clean(self):
|
||||
print dir(self)
|
||||
print(dir(self))
|
||||
if self.cleaned_data.get("caveOrLocation") == "cave" and not self.cleaned_data.get("cave"):
|
||||
self._errors["cave"] = self.error_class(["This field is required"])
|
||||
if self.cleaned_data.get("caveOrLocation") == "location" and not self.cleaned_data.get("location"):
|
||||
|
||||
@@ -1,182 +1,33 @@
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
import os
|
||||
from optparse import make_option
|
||||
from troggle.core.models import Cave
|
||||
|
||||
from django.db import connection
|
||||
from django.core import management
|
||||
from django.core.urlresolvers import reverse
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
from troggle.core.models import Cave, Entrance
|
||||
import troggle.flatpages.models
|
||||
|
||||
import settings
|
||||
|
||||
databasename=settings.DATABASES['default']['NAME']
|
||||
expouser=settings.EXPOUSER
|
||||
expouserpass=settings.EXPOUSERPASS
|
||||
expouseremail=settings.EXPOUSER_EMAIL
|
||||
"""Pretty much all of this is now replaced by databaseRest.py
|
||||
I don't know why this still exists. Needs testing to see if
|
||||
removing it makes django misbehave.
|
||||
"""
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'This is normal usage, clear database and reread everything'
|
||||
help = 'Removed as redundant - use databaseReset.py'
|
||||
|
||||
option_list = BaseCommand.option_list + (
|
||||
make_option('--foo',
|
||||
make_option('--reset',
|
||||
action='store_true',
|
||||
dest='foo',
|
||||
dest='reset',
|
||||
default=False,
|
||||
help='test'),
|
||||
help='Removed as redundant'),
|
||||
)
|
||||
|
||||
def add_arguments(self, parser):
|
||||
|
||||
parser.add_argument(
|
||||
'--foo',
|
||||
action='store_true',
|
||||
dest='foo',
|
||||
help='Help text',
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
print(args)
|
||||
print(options)
|
||||
if "desc" in args:
|
||||
self.resetdesc()
|
||||
elif "scans" in args:
|
||||
self.import_surveyscans()
|
||||
elif "caves" in args:
|
||||
self.reload_db()
|
||||
self.make_dirs()
|
||||
self.pageredirects()
|
||||
self.import_caves()
|
||||
elif "people" in args:
|
||||
self.import_people()
|
||||
elif "QMs" in args:
|
||||
self.import_QMs()
|
||||
elif "tunnel" in args:
|
||||
self.import_tunnelfiles()
|
||||
elif "reset" in args:
|
||||
self.reset()
|
||||
elif "survex" in args:
|
||||
self.import_survex()
|
||||
elif "survexpos" in args:
|
||||
import parsers.survex
|
||||
parsers.survex.LoadPos()
|
||||
elif "logbooks" in args:
|
||||
self.import_logbooks()
|
||||
elif "autologbooks" in args:
|
||||
self.import_auto_logbooks()
|
||||
elif "dumplogbooks" in args:
|
||||
self.dumplogbooks()
|
||||
elif "writeCaves" in args:
|
||||
self.writeCaves()
|
||||
elif "foo" in args:
|
||||
self.stdout.write('Tesing....')
|
||||
else:
|
||||
self.stdout.write("%s not recognised" % args)
|
||||
self.usage(options)
|
||||
|
||||
def reload_db():
|
||||
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
|
||||
try:
|
||||
os.remove(databasename)
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
cursor = connection.cursor()
|
||||
cursor.execute("DROP DATABASE %s" % databasename)
|
||||
cursor.execute("CREATE DATABASE %s" % databasename)
|
||||
cursor.execute("ALTER DATABASE %s CHARACTER SET=utf8" % databasename)
|
||||
cursor.execute("USE %s" % databasename)
|
||||
management.call_command('migrate', interactive=False)
|
||||
# management.call_command('syncdb', interactive=False)
|
||||
user = User.objects.create_user(expouser, expouseremail, expouserpass)
|
||||
user.is_staff = True
|
||||
user.is_superuser = True
|
||||
user.save()
|
||||
|
||||
def make_dirs():
|
||||
"""Make directories that troggle requires"""
|
||||
# should also deal with permissions here.
|
||||
if not os.path.isdir(settings.PHOTOS_ROOT):
|
||||
os.mkdir(settings.PHOTOS_ROOT)
|
||||
|
||||
def import_caves():
|
||||
import parsers.caves
|
||||
print("importing caves")
|
||||
parsers.caves.readcaves()
|
||||
|
||||
def import_people():
|
||||
import parsers.people
|
||||
parsers.people.LoadPersonsExpos()
|
||||
|
||||
def import_logbooks():
|
||||
# The below line was causing errors I didn't understand (it said LOGFILE was a string), and I couldn't be bothered to figure
|
||||
# what was going on so I just catch the error with a try. - AC 21 May
|
||||
try:
|
||||
settings.LOGFILE.write('\nBegun importing logbooks at ' + time.asctime() + '\n' + '-' * 60)
|
||||
except:
|
||||
pass
|
||||
|
||||
import parsers.logbooks
|
||||
parsers.logbooks.LoadLogbooks()
|
||||
|
||||
def import_survex():
|
||||
import parsers.survex
|
||||
parsers.survex.LoadAllSurvexBlocks()
|
||||
parsers.survex.LoadPos()
|
||||
|
||||
def import_QMs():
|
||||
import parsers.QMs
|
||||
|
||||
def import_surveys():
|
||||
import parsers.surveys
|
||||
parsers.surveys.parseSurveys(logfile=settings.LOGFILE)
|
||||
|
||||
def import_surveyscans():
|
||||
import parsers.surveys
|
||||
parsers.surveys.LoadListScans()
|
||||
|
||||
def import_tunnelfiles():
|
||||
import parsers.surveys
|
||||
parsers.surveys.LoadTunnelFiles()
|
||||
|
||||
def reset():
|
||||
""" Wipe the troggle database and import everything from legacy data
|
||||
"""
|
||||
reload_db()
|
||||
make_dirs()
|
||||
pageredirects()
|
||||
import_caves()
|
||||
import_people()
|
||||
import_surveyscans()
|
||||
import_survex()
|
||||
import_logbooks()
|
||||
import_QMs()
|
||||
try:
|
||||
import_tunnelfiles()
|
||||
except:
|
||||
print("Tunnel files parser broken.")
|
||||
|
||||
import_surveys()
|
||||
|
||||
def pageredirects():
|
||||
for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
|
||||
f = troggle.flatpages.models.Redirect(originalURL=oldURL, newURL=newURL)
|
||||
f.save()
|
||||
|
||||
def writeCaves():
|
||||
for cave in Cave.objects.all():
|
||||
cave.writeDataFile()
|
||||
for entrance in Entrance.objects.all():
|
||||
entrance.writeDataFile()
|
||||
|
||||
def usage(self, parser):
|
||||
print("""Usage is 'manage.py reset_db <command>'
|
||||
where command is:
|
||||
reset - this is normal usage, clear database and reread everything
|
||||
desc
|
||||
caves - read in the caves
|
||||
logbooks - read in the logbooks
|
||||
autologbooks
|
||||
dumplogbooks
|
||||
people
|
||||
QMs - read in the QM files
|
||||
resetend
|
||||
scans - read in the scanned surveynotes
|
||||
survex - read in the survex files
|
||||
survexpos
|
||||
tunnel - read in the Tunnel files
|
||||
writeCaves
|
||||
""")
|
||||
|
||||
138
core/models.py
138
core/models.py
@@ -10,7 +10,7 @@ from django.db.models import Min, Max
|
||||
from django.conf import settings
|
||||
from decimal import Decimal, getcontext
|
||||
from django.core.urlresolvers import reverse
|
||||
from imagekit.models import ProcessedImageField #ImageModel
|
||||
from imagekit.models import ImageModel
|
||||
from django.template import Context, loader
|
||||
import settings
|
||||
getcontext().prec=2 #use 2 significant figures for decimal calculations
|
||||
@@ -39,10 +39,8 @@ try:
|
||||
filename=settings.LOGFILE,
|
||||
filemode='w')
|
||||
except:
|
||||
subprocess.call(settings.FIX_PERMISSIONS)
|
||||
logging.basicConfig(level=logging.DEBUG,
|
||||
filename=settings.LOGFILE,
|
||||
filemode='w')
|
||||
# Opening of file for writing is going to fail currently, so decide it doesn't matter for now
|
||||
pass
|
||||
|
||||
#This class is for adding fields and methods which all of our models will have.
|
||||
class TroggleModel(models.Model):
|
||||
@@ -104,37 +102,32 @@ class Expedition(TroggleModel):
|
||||
def day_max(self):
|
||||
res = self.expeditionday_set.all()
|
||||
return res and res[len(res) - 1] or None
|
||||
|
||||
|
||||
|
||||
|
||||
class ExpeditionDay(TroggleModel):
|
||||
expedition = models.ForeignKey("Expedition")
|
||||
date = models.DateField()
|
||||
|
||||
class Meta:
|
||||
ordering = ('date',)
|
||||
ordering = ('date',)
|
||||
|
||||
def GetPersonTrip(self, personexpedition):
|
||||
personexpeditions = self.persontrip_set.filter(expeditionday=self)
|
||||
return personexpeditions and personexpeditions[0] or None
|
||||
|
||||
|
||||
|
||||
#
|
||||
# single Person, can go on many years
|
||||
#
|
||||
class Person(TroggleModel):
|
||||
first_name = models.CharField(max_length=100)
|
||||
last_name = models.CharField(max_length=100)
|
||||
fullname = models.CharField(max_length=200)
|
||||
is_vfho = models.BooleanField(help_text="VFHO is the Vereines für Höhlenkunde in Obersteier, a nearby Austrian caving club.", default=False)
|
||||
mug_shot = models.CharField(max_length=100, blank=True,null=True)
|
||||
blurb = models.TextField(blank=True,null=True)
|
||||
|
||||
#href = models.CharField(max_length=200)
|
||||
orderref = models.CharField(max_length=200) # for alphabetic
|
||||
|
||||
#the below have been removed and made methods. I'm not sure what the b in bisnotable stands for. - AC 16 Feb
|
||||
#notability = models.FloatField() # for listing the top 20 people
|
||||
#bisnotable = models.BooleanField(default=False)
|
||||
user = models.OneToOneField(User, null=True, blank=True)
|
||||
def get_absolute_url(self):
|
||||
return urlparse.urljoin(settings.URL_ROOT,reverse('person',kwargs={'first_name':self.first_name,'last_name':self.last_name}))
|
||||
@@ -151,9 +144,15 @@ class Person(TroggleModel):
|
||||
|
||||
def notability(self):
|
||||
notability = Decimal(0)
|
||||
max_expo_val = 0
|
||||
|
||||
max_expo_year = Expedition.objects.all().aggregate(Max('year'))
|
||||
max_expo_val = int(max_expo_year['year__max']) + 1
|
||||
|
||||
for personexpedition in self.personexpedition_set.all():
|
||||
if not personexpedition.is_guest:
|
||||
notability += Decimal(1) / (2012 - int(personexpedition.expedition.year))
|
||||
print(personexpedition.expedition.year)
|
||||
notability += Decimal(1) / (max_expo_val - int(personexpedition.expedition.year))
|
||||
return notability
|
||||
|
||||
def bisnotable(self):
|
||||
@@ -241,18 +240,22 @@ class PersonExpedition(TroggleModel):
|
||||
# Single parsed entry from Logbook
|
||||
#
|
||||
class LogbookEntry(TroggleModel):
|
||||
date = models.DateField()#MJG wants to turn this into a datetime such that multiple Logbook entries on the same day can be ordered.
|
||||
|
||||
LOGBOOK_ENTRY_TYPES = (
|
||||
("wiki", "Wiki style logbook"),
|
||||
("html", "Html style logbook")
|
||||
)
|
||||
|
||||
date = models.DateField()#MJG wants to turn this into a datetime such that multiple Logbook entries on the same day can be ordered.ld()
|
||||
expeditionday = models.ForeignKey("ExpeditionDay", null=True)#MJG wants to KILL THIS (redundant information)
|
||||
expedition = models.ForeignKey(Expedition,blank=True,null=True) # yes this is double-
|
||||
#author = models.ForeignKey(PersonExpedition,blank=True,null=True) # the person who writes it up doesn't have to have been on the trip.
|
||||
# Re: the above- so this field should be "typist" or something, not "author". - AC 15 jun 09
|
||||
#MJG wants to KILL THIS, as it is typically redundant with PersonTrip.is_logbook_entry_author, in the rare it was not redundanty and of actually interest it could be added to the text.
|
||||
title = models.CharField(max_length=settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH)
|
||||
cave_slug = models.SlugField(max_length=50)
|
||||
place = models.CharField(max_length=100,blank=True,null=True,help_text="Only use this if you haven't chosen a cave")
|
||||
text = models.TextField()
|
||||
slug = models.SlugField(max_length=50)
|
||||
filename = models.CharField(max_length=200,null=True)
|
||||
title = models.CharField(max_length=settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH)
|
||||
cave_slug = models.SlugField(max_length=50)
|
||||
place = models.CharField(max_length=100,blank=True,null=True,help_text="Only use this if you haven't chosen a cave")
|
||||
text = models.TextField()
|
||||
slug = models.SlugField(max_length=50)
|
||||
filename = models.CharField(max_length=200,null=True)
|
||||
entry_type = models.CharField(default="wiki",null=True,choices=LOGBOOK_ENTRY_TYPES,max_length=50)
|
||||
|
||||
class Meta:
|
||||
verbose_name_plural = "Logbook Entries"
|
||||
@@ -291,7 +294,7 @@ class LogbookEntry(TroggleModel):
|
||||
if self.cave:
|
||||
nextQMnumber=self.cave.new_QM_number(self.date.year)
|
||||
else:
|
||||
return none
|
||||
return None
|
||||
return nextQMnumber
|
||||
|
||||
def new_QM_found_link(self):
|
||||
@@ -301,6 +304,7 @@ class LogbookEntry(TroggleModel):
|
||||
def DayIndex(self):
|
||||
return list(self.expeditionday.logbookentry_set.all()).index(self)
|
||||
|
||||
|
||||
#
|
||||
# Single Person going on a trip, which may or may not be written up (accounts for different T/U for people in same logbook entry)
|
||||
#
|
||||
@@ -446,12 +450,12 @@ class Cave(TroggleModel):
|
||||
elif self.unofficial_number:
|
||||
href = self.unofficial_number
|
||||
else:
|
||||
href = official_name.lower()
|
||||
href = self.official_name.lower()
|
||||
#return settings.URL_ROOT + '/cave/' + href + '/'
|
||||
return urlparse.urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,}))
|
||||
|
||||
def __unicode__(self, sep = u": "):
|
||||
return unicode(self.slug())
|
||||
return unicode("slug:"+self.slug())
|
||||
|
||||
def get_QMs(self):
|
||||
return QM.objects.filter(found_by__cave_slug=self.caveslug_set.all())
|
||||
@@ -529,13 +533,15 @@ class Cave(TroggleModel):
|
||||
|
||||
def getCaveByReference(reference):
|
||||
areaname, code = reference.split("-", 1)
|
||||
print(areaname, code)
|
||||
#print(areaname, code)
|
||||
area = Area.objects.get(short_name = areaname)
|
||||
print(area)
|
||||
foundCaves = list(Cave.objects.filter(area = area, kataster_number = code).all()) + list(Cave.objects.filter(area = area, unofficial_number = code).all())
|
||||
#print(area)
|
||||
foundCaves = list(Cave.objects.filter(area = area, kataster_number = code).all()) + list(Cave.objects.filter(area = area, unofficial_number = code).all())
|
||||
print(list(foundCaves))
|
||||
assert len(foundCaves) == 1
|
||||
return foundCaves[0]
|
||||
if len(foundCaves) == 1:
|
||||
return foundCaves[0]
|
||||
else:
|
||||
return False
|
||||
|
||||
class OtherCaveName(TroggleModel):
|
||||
name = models.CharField(max_length=160)
|
||||
@@ -738,17 +744,17 @@ class QM(TroggleModel):
|
||||
|
||||
number = models.IntegerField(help_text="this is the sequential number in the year", )
|
||||
GRADE_CHOICES=(
|
||||
('A', 'A: Large obvious lead'),
|
||||
('B', 'B: Average lead'),
|
||||
('C', 'C: Tight unpromising lead'),
|
||||
('D', 'D: Dig'),
|
||||
('X', 'X: Unclimbable aven')
|
||||
('A', 'A: Large obvious lead'),
|
||||
('B', 'B: Average lead'),
|
||||
('C', 'C: Tight unpromising lead'),
|
||||
('D', 'D: Dig'),
|
||||
('X', 'X: Unclimbable aven')
|
||||
)
|
||||
grade = models.CharField(max_length=1, choices=GRADE_CHOICES)
|
||||
location_description = models.TextField(blank=True)
|
||||
#should be a foreignkey to surveystation
|
||||
nearest_station_description = models.CharField(max_length=400,null=True,blank=True)
|
||||
nearest_station = models.CharField(max_length=200,blank=True,null=True)
|
||||
nearest_station_name = models.CharField(max_length=200,blank=True,null=True)
|
||||
nearest_station = models.ForeignKey(SurvexStation,null=True,blank=True)
|
||||
area = models.CharField(max_length=100,blank=True,null=True)
|
||||
completion_description = models.TextField(blank=True,null=True)
|
||||
comment=models.TextField(blank=True,null=True)
|
||||
@@ -772,31 +778,32 @@ class QM(TroggleModel):
|
||||
def wiki_link(self):
|
||||
return u"%s%s%s" % ('[[QM:',self.code(),']]')
|
||||
|
||||
photoFileStorage = FileSystemStorage(location=settings.PHOTOS_ROOT, base_url=settings.PHOTOS_URL)
|
||||
class DPhoto(TroggleImageModel):
|
||||
caption = models.CharField(max_length=1000,blank=True,null=True)
|
||||
contains_logbookentry = models.ForeignKey(LogbookEntry,blank=True,null=True)
|
||||
contains_person = models.ManyToManyField(Person,blank=True,null=True)
|
||||
file = models.ImageField(storage=photoFileStorage, upload_to='.',)
|
||||
is_mugshot = models.BooleanField(default=False)
|
||||
contains_cave = models.ForeignKey(Cave,blank=True,null=True)
|
||||
contains_entrance = models.ForeignKey(Entrance, related_name="photo_file",blank=True,null=True)
|
||||
#photoFileStorage = FileSystemStorage(location=settings.PHOTOS_ROOT, base_url=settings.PHOTOS_URL)
|
||||
#class DPhoto(TroggleImageModel):
|
||||
#caption = models.CharField(max_length=1000,blank=True,null=True)
|
||||
#contains_logbookentry = models.ForeignKey(LogbookEntry,blank=True,null=True)
|
||||
#contains_person = models.ManyToManyField(Person,blank=True,null=True)
|
||||
# replace link to copied file with link to original file location
|
||||
#file = models.ImageField(storage=photoFileStorage, upload_to='.',)
|
||||
#is_mugshot = models.BooleanField(default=False)
|
||||
#contains_cave = models.ForeignKey(Cave,blank=True,null=True)
|
||||
#contains_entrance = models.ForeignKey(Entrance, related_name="photo_file",blank=True,null=True)
|
||||
#nearest_survey_point = models.ForeignKey(SurveyStation,blank=True,null=True)
|
||||
nearest_QM = models.ForeignKey(QM,blank=True,null=True)
|
||||
lon_utm = models.FloatField(blank=True,null=True)
|
||||
lat_utm = models.FloatField(blank=True,null=True)
|
||||
#nearest_QM = models.ForeignKey(QM,blank=True,null=True)
|
||||
#lon_utm = models.FloatField(blank=True,null=True)
|
||||
#lat_utm = models.FloatField(blank=True,null=True)
|
||||
|
||||
class IKOptions:
|
||||
spec_module = 'core.imagekit_specs'
|
||||
cache_dir = 'thumbs'
|
||||
image_field = 'file'
|
||||
# class IKOptions:
|
||||
# spec_module = 'core.imagekit_specs'
|
||||
# cache_dir = 'thumbs'
|
||||
# image_field = 'file'
|
||||
|
||||
#content_type = models.ForeignKey(ContentType)
|
||||
#object_id = models.PositiveIntegerField()
|
||||
#location = generic.GenericForeignKey('content_type', 'object_id')
|
||||
|
||||
def __unicode__(self):
|
||||
return self.caption
|
||||
# def __unicode__(self):
|
||||
# return self.caption
|
||||
|
||||
scansFileStorage = FileSystemStorage(location=settings.SURVEY_SCANS, base_url=settings.SURVEYS_URL)
|
||||
def get_scan_path(instance, filename):
|
||||
@@ -807,7 +814,7 @@ def get_scan_path(instance, filename):
|
||||
number=str(instance.survey.wallet_letter) + number #two strings formatting because convention is 2009#01 or 2009#X01
|
||||
return os.path.join('./',year,year+r'#'+number,str(instance.contents)+str(instance.number_in_wallet)+r'.jpg')
|
||||
|
||||
class ScannedImage(TroggleImageModel):
|
||||
class ScannedImage(TroggleImageModel):
|
||||
file = models.ImageField(storage=scansFileStorage, upload_to=get_scan_path)
|
||||
scanned_by = models.ForeignKey(Person,blank=True, null=True)
|
||||
scanned_on = models.DateField(null=True)
|
||||
@@ -828,7 +835,7 @@ class ScannedImage(TroggleImageModel):
|
||||
#This is an ugly hack to deal with the #s in our survey scan paths. The correct thing is to write a custom file storage backend which calls urlencode on the name for making file.url but not file.path.
|
||||
def correctURL(self):
|
||||
return string.replace(self.file.url,r'#',r'%23')
|
||||
|
||||
|
||||
def __unicode__(self):
|
||||
return get_scan_path(self,'')
|
||||
|
||||
@@ -861,3 +868,14 @@ class Survey(TroggleModel):
|
||||
|
||||
def elevations(self):
|
||||
return self.scannedimage_set.filter(contents='elevation')
|
||||
|
||||
class DataIssue(TroggleModel):
|
||||
date = models.DateTimeField(auto_now_add=True, blank=True)
|
||||
parser = models.CharField(max_length=50, blank=True, null=True)
|
||||
message = models.CharField(max_length=400, blank=True, null=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ['date']
|
||||
|
||||
def __unicode__(self):
|
||||
return u"%s - %s" % (self.parser, self.message)
|
||||
|
||||
@@ -97,7 +97,7 @@ class SurvexBlockLookUpManager(models.Manager):
|
||||
blocknames = []
|
||||
else:
|
||||
blocknames = name.split(".")
|
||||
block = SurvexBlock.objects.get(parent=None, survexfile__path="all")
|
||||
block = SurvexBlock.objects.get(parent=None, survexfile__path=settings.SURVEX_TOPNAME)
|
||||
for blockname in blocknames:
|
||||
block = SurvexBlock.objects.get(parent=block, name__iexact=blockname)
|
||||
return block
|
||||
@@ -147,7 +147,7 @@ class SurvexBlock(models.Model):
|
||||
return ssl[0]
|
||||
#print name
|
||||
ss = SurvexStation(name=name, block=self)
|
||||
ss.save()
|
||||
#ss.save()
|
||||
return ss
|
||||
|
||||
def DayIndex(self):
|
||||
@@ -197,6 +197,9 @@ class SurvexScansFolder(models.Model):
|
||||
|
||||
def get_absolute_url(self):
|
||||
return urlparse.urljoin(settings.URL_ROOT, reverse('surveyscansfolder', kwargs={"path":re.sub("#", "%23", self.walletname)}))
|
||||
|
||||
def __unicode__(self):
|
||||
return unicode(self.walletname) + " (Survey Scans Folder)"
|
||||
|
||||
class SurvexScanSingle(models.Model):
|
||||
ffile = models.CharField(max_length=200)
|
||||
@@ -208,6 +211,9 @@ class SurvexScanSingle(models.Model):
|
||||
|
||||
def get_absolute_url(self):
|
||||
return urlparse.urljoin(settings.URL_ROOT, reverse('surveyscansingle', kwargs={"path":re.sub("#", "%23", self.survexscansfolder.walletname), "file":self.name}))
|
||||
|
||||
def __unicode__(self):
|
||||
return "Survey Scan Image: " + unicode(self.name) + " in " + unicode(self.survexscansfolder)
|
||||
|
||||
|
||||
class TunnelFile(models.Model):
|
||||
@@ -225,4 +231,4 @@ class TunnelFile(models.Model):
|
||||
|
||||
class Meta:
|
||||
ordering = ('tunnelpath',)
|
||||
|
||||
|
||||
|
||||
@@ -47,6 +47,6 @@ def survex_to_html(value, autoescape=None):
|
||||
if autoescape:
|
||||
value = conditional_escape(value)
|
||||
for regex, sub in regexes:
|
||||
print sub
|
||||
print(sub)
|
||||
value = regex.sub(sub, value)
|
||||
return mark_safe(value)
|
||||
@@ -3,11 +3,10 @@ from django.utils.html import conditional_escape
|
||||
from django.template.defaultfilters import stringfilter
|
||||
from django.utils.safestring import mark_safe
|
||||
from django.conf import settings
|
||||
from troggle.core.models import QM, DPhoto, LogbookEntry, Cave
|
||||
from troggle.core.models import QM, LogbookEntry, Cave
|
||||
import re, urlparse
|
||||
|
||||
register = template.Library()
|
||||
|
||||
|
||||
@register.filter()
|
||||
def plusone(n):
|
||||
@@ -77,7 +76,7 @@ def wiki_to_html_short(value, autoescape=None):
|
||||
if number>1:
|
||||
return '<h'+num+'>'+matchobj.groups()[1]+'</h'+num+'>'
|
||||
else:
|
||||
print 'morethanone'
|
||||
print('morethanone')
|
||||
return matchobj.group()
|
||||
value = re.sub(r"(?m)^(=+)([^=]+)(=+)$",headerrepl,value)
|
||||
|
||||
@@ -121,13 +120,13 @@ def wiki_to_html_short(value, autoescape=None):
|
||||
except KeyError:
|
||||
linkText=None
|
||||
|
||||
try:
|
||||
photo=DPhoto.objects.get(file=matchdict['photoName'])
|
||||
if not linkText:
|
||||
linkText=str(photo)
|
||||
res=r'<a href=' + photo.get_admin_url() +'>' + linkText + '</a>'
|
||||
except Photo.DoesNotExist:
|
||||
res = r'<a class="redtext" href="">make new photo</a>'
|
||||
# try:
|
||||
# photo=DPhoto.objects.get(file=matchdict['photoName'])
|
||||
# if not linkText:
|
||||
# linkText=str(photo)
|
||||
# res=r'<a href=' + photo.get_admin_url() +'>' + linkText + '</a>'
|
||||
# except Photo.DoesNotExist:
|
||||
# res = r'<a class="redtext" href="">make new photo</a>'
|
||||
return res
|
||||
|
||||
def photoSrcRepl(matchobj):
|
||||
@@ -143,13 +142,13 @@ def wiki_to_html_short(value, autoescape=None):
|
||||
value = re.sub(photoSrcPattern,photoSrcRepl, value, re.DOTALL)
|
||||
|
||||
#make cave links
|
||||
value = re.sub("\[\[\s*cave:([^\s]+)\s*\s*\]\]", r'<a href="%scave/\1/">\1</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
value = re.sub(r"\[\[\s*cave:([^\s]+)\s*\s*\]\]", r'<a href="%scave/\1/">\1</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
#make people links
|
||||
value = re.sub("\[\[\s*person:(.+)\|(.+)\]\]",r'<a href="%sperson/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
value = re.sub(r"\[\[\s*person:(.+)\|(.+)\]\]",r'<a href="%sperson/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
#make subcave links
|
||||
value = re.sub("\[\[\s*subcave:(.+)\|(.+)\]\]",r'<a href="%ssubcave/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
value = re.sub(r"\[\[\s*subcave:(.+)\|(.+)\]\]",r'<a href="%ssubcave/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
#make cavedescription links
|
||||
value = re.sub("\[\[\s*cavedescription:(.+)\|(.+)\]\]",r'<a href="%scavedescription/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
value = re.sub(r"\[\[\s*cavedescription:(.+)\|(.+)\]\]",r'<a href="%scavedescription/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
|
||||
|
||||
|
||||
|
||||
194
core/views_caves.py
Normal file → Executable file
194
core/views_caves.py
Normal file → Executable file
@@ -1,5 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import (absolute_import, division,
|
||||
print_function, unicode_literals)
|
||||
|
||||
from troggle.core.models import CaveSlug, Cave, CaveAndEntrance, Survey, Expedition, QM, CaveDescription, EntranceSlug, Entrance, Area, SurvexStation
|
||||
from troggle.core.forms import CaveForm, CaveAndEntranceFormSet, VersionControlCommentForm, EntranceForm, EntranceLetterForm
|
||||
@@ -7,19 +9,44 @@ import troggle.core.models as models
|
||||
import troggle.settings as settings
|
||||
from troggle.helper import login_required_if_public
|
||||
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
from django.forms.models import modelformset_factory
|
||||
from django import forms
|
||||
from django.core.urlresolvers import reverse
|
||||
from utils import render_with_context # see views_logbooks for explanation on this.
|
||||
from django.http import HttpResponse, HttpResponseRedirect
|
||||
from django.conf import settings
|
||||
import re, urlparse
|
||||
from django.shortcuts import get_object_or_404
|
||||
import re
|
||||
import os
|
||||
import urlparse
|
||||
#import urllib.parse
|
||||
from django.shortcuts import get_object_or_404, render
|
||||
import settings
|
||||
|
||||
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
import string, os, sys, subprocess
|
||||
class MapLocations(object):
|
||||
p = [
|
||||
("laser.0_7", "BNase", "Reference", "Bräuning Nase laser point"),
|
||||
("226-96", "BZkn", "Reference", "Bräuning Zinken trig point"),
|
||||
("vd1","VD1","Reference", "VD1 survey point"),
|
||||
("laser.kt114_96","HSK","Reference", "Hinterer Schwarzmooskogel trig point"),
|
||||
("2000","Nipple","Reference", "Nipple (Weiße Warze)"),
|
||||
("3000","VSK","Reference", "Vorderer Schwarzmooskogel summit"),
|
||||
("topcamp", "OTC", "Reference", "Old Top Camp"),
|
||||
("laser.0", "LSR0", "Reference", "Laser Point 0"),
|
||||
("laser.0_1", "LSR1", "Reference", "Laser Point 0/1"),
|
||||
("laser.0_3", "LSR3", "Reference", "Laser Point 0/3"),
|
||||
("laser.0_5", "LSR5", "Reference", "Laser Point 0/5"),
|
||||
("225-96", "BAlm", "Reference", "Bräuning Alm trig point")
|
||||
]
|
||||
def points(self):
|
||||
for ent in Entrance.objects.all():
|
||||
if ent.best_station():
|
||||
areaName = ent.caveandentrance_set.all()[0].cave.getArea().short_name
|
||||
self.p.append((ent.best_station(), "%s-%s" % (areaName, str(ent)[5:]), ent.needs_surface_work(), str(ent)))
|
||||
return self.p
|
||||
|
||||
def __str__(self):
|
||||
return "{} map locations".format(len(self.p))
|
||||
|
||||
def getCave(cave_id):
|
||||
"""Returns a cave object when given a cave name or number. It is used by views including cavehref, ent, and qm."""
|
||||
@@ -58,7 +85,7 @@ def caveindex(request):
|
||||
caves1626 = list(Cave.objects.filter(area__short_name = "1626"))
|
||||
caves1623.sort(caveCmp)
|
||||
caves1626.sort(caveCmp)
|
||||
return render_with_context(request,'caveindex.html', {'caves1623': caves1623, 'caves1626': caves1626, 'notablecaves':notablecaves, 'cavepage': True})
|
||||
return render(request,'caveindex.html', {'caves1623': caves1623, 'caves1626': caves1626, 'notablecaves':notablecaves, 'cavepage': True})
|
||||
|
||||
def millenialcaves(request):
|
||||
#RW messing around area
|
||||
@@ -83,43 +110,43 @@ def cave3d(request, cave_id=''):
|
||||
def cave(request, cave_id='', offical_name=''):
|
||||
cave=getCave(cave_id)
|
||||
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
|
||||
return render_with_context(request,'nonpublic.html', {'instance': cave, 'cavepage': True, 'cave_id': cave_id})
|
||||
return render(request,'nonpublic.html', {'instance': cave, 'cavepage': True, 'cave_id': cave_id})
|
||||
else:
|
||||
return render_with_context(request,'cave.html', {'settings': settings, 'cave': cave, 'cavepage': True, 'cave_id': cave_id})
|
||||
return render(request,'cave.html', {'settings': settings, 'cave': cave, 'cavepage': True, 'cave_id': cave_id})
|
||||
|
||||
def caveEntrance(request, slug):
|
||||
cave = Cave.objects.get(caveslug__slug = slug)
|
||||
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
|
||||
return render_with_context(request,'nonpublic.html', {'instance': cave})
|
||||
return render(request,'nonpublic.html', {'instance': cave})
|
||||
else:
|
||||
return render_with_context(request,'cave_entrances.html', {'cave': cave})
|
||||
return render(request,'cave_entrances.html', {'cave': cave})
|
||||
|
||||
def caveDescription(request, slug):
|
||||
cave = Cave.objects.get(caveslug__slug = slug)
|
||||
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
|
||||
return render_with_context(request,'nonpublic.html', {'instance': cave})
|
||||
return render(request,'nonpublic.html', {'instance': cave})
|
||||
else:
|
||||
return render_with_context(request,'cave_uground_description.html', {'cave': cave})
|
||||
return render(request,'cave_uground_description.html', {'cave': cave})
|
||||
|
||||
def caveQMs(request, slug):
|
||||
cave = Cave.objects.get(caveslug__slug = slug)
|
||||
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
|
||||
return render_with_context(request,'nonpublic.html', {'instance': cave})
|
||||
return render(request,'nonpublic.html', {'instance': cave})
|
||||
else:
|
||||
return render_with_context(request,'cave_qms.html', {'cave': cave})
|
||||
return render(request,'cave_qms.html', {'cave': cave})
|
||||
def caveLogbook(request, slug):
|
||||
cave = Cave.objects.get(caveslug__slug = slug)
|
||||
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
|
||||
return render_with_context(request,'nonpublic.html', {'instance': cave})
|
||||
return render(request,'nonpublic.html', {'instance': cave})
|
||||
else:
|
||||
return render_with_context(request,'cave_logbook.html', {'cave': cave})
|
||||
return render(request,'cave_logbook.html', {'cave': cave})
|
||||
|
||||
def caveSlug(request, slug):
|
||||
cave = Cave.objects.get(caveslug__slug = slug)
|
||||
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
|
||||
return render_with_context(request,'nonpublic.html', {'instance': cave, 'cave_editable': slug})
|
||||
return render(request,'nonpublic.html', {'instance': cave, 'cave_editable': slug})
|
||||
else:
|
||||
return render_with_context(request,'cave.html', {'cave': cave, 'cave_editable': slug})
|
||||
return render(request,'cave.html', {'cave': cave, 'cave_editable': slug})
|
||||
|
||||
@login_required_if_public
|
||||
def edit_cave(request, slug=None):
|
||||
@@ -160,7 +187,7 @@ def edit_cave(request, slug=None):
|
||||
ceFormSet = CaveAndEntranceFormSet(queryset=cave.caveandentrance_set.all())
|
||||
versionControlForm = VersionControlCommentForm()
|
||||
|
||||
return render_with_context(request,
|
||||
return render(request,
|
||||
'editcave2.html',
|
||||
{'form': form,
|
||||
'caveAndEntranceFormSet': ceFormSet,
|
||||
@@ -204,7 +231,7 @@ def editEntrance(request, caveslug, slug=None):
|
||||
entletter = EntranceLetterForm(request.POST)
|
||||
else:
|
||||
entletter = None
|
||||
return render_with_context(request,
|
||||
return render(request,
|
||||
'editentrance.html',
|
||||
{'form': form,
|
||||
'versionControlForm': versionControlForm,
|
||||
@@ -215,10 +242,10 @@ def qm(request,cave_id,qm_id,year,grade=None):
|
||||
year=int(year)
|
||||
try:
|
||||
qm=getCave(cave_id).get_QMs().get(number=qm_id,found_by__date__year=year)
|
||||
return render_with_context(request,'qm.html',locals())
|
||||
return render(request,'qm.html',locals())
|
||||
|
||||
except QM.DoesNotExist:
|
||||
url=urlparse.urljoin(settings.URL_ROOT, r'/admin/core/qm/add/'+'?'+ r'number=' + qm_id)
|
||||
url=urllib.parse.urljoin(settings.URL_ROOT, r'/admin/core/qm/add/'+'?'+ r'number=' + qm_id)
|
||||
if grade:
|
||||
url += r'&grade=' + grade
|
||||
return HttpResponseRedirect(url)
|
||||
@@ -228,29 +255,29 @@ def qm(request,cave_id,qm_id,year,grade=None):
|
||||
def ent(request, cave_id, ent_letter):
|
||||
cave = Cave.objects.filter(kataster_number = cave_id)[0]
|
||||
cave_and_ent = CaveAndEntrance.objects.filter(cave = cave).filter(entrance_letter = ent_letter)[0]
|
||||
return render_with_context(request,'entrance.html', {'cave': cave,
|
||||
return render(request,'entrance.html', {'cave': cave,
|
||||
'entrance': cave_and_ent.entrance,
|
||||
'letter': cave_and_ent.entrance_letter,})
|
||||
|
||||
def entranceSlug(request, slug):
|
||||
entrance = Entrance.objects.get(entranceslug__slug = slug)
|
||||
if entrance.non_public and not request.user.is_authenticated():
|
||||
return render_with_context(request,'nonpublic.html', {'instance': entrance})
|
||||
return render(request,'nonpublic.html', {'instance': entrance})
|
||||
else:
|
||||
return render_with_context(request,'entranceslug.html', {'entrance': entrance})
|
||||
return render(request,'entranceslug.html', {'entrance': entrance})
|
||||
|
||||
def survexblock(request, survexpath):
|
||||
survexpath = re.sub("/", ".", survexpath)
|
||||
print "jjjjjj", survexpath
|
||||
print("jjjjjj", survexpath)
|
||||
survexblock = models.SurvexBlock.objects.get(survexpath=survexpath)
|
||||
#ftext = survexblock.filecontents()
|
||||
ftext = survexblock.text
|
||||
return render_with_context(request,'survexblock.html', {'survexblock':survexblock, 'ftext':ftext, })
|
||||
return render(request,'survexblock.html', {'survexblock':survexblock, 'ftext':ftext, })
|
||||
|
||||
def surveyindex(request):
|
||||
surveys=Survey.objects.all()
|
||||
expeditions=Expedition.objects.order_by("-year")
|
||||
return render_with_context(request,'survey.html',locals())
|
||||
return render(request,'survey.html',locals())
|
||||
|
||||
def survey(request,year,wallet_number):
|
||||
surveys=Survey.objects.all()
|
||||
@@ -263,19 +290,19 @@ def survey(request,year,wallet_number):
|
||||
planSketches=current_survey.scannedimage_set.filter(contents='plan')
|
||||
elevationSketches=current_survey.scannedimage_set.filter(contents='elevation')
|
||||
|
||||
return render_with_context(request,'survey.html', locals())
|
||||
return render(request,'survey.html', locals())
|
||||
|
||||
def cave_description(request, cavedescription_name):
|
||||
cave_description = get_object_or_404(CaveDescription, short_name = cavedescription_name)
|
||||
return render_with_context(request,'cave_description.html', locals())
|
||||
return render(request,'cave_description.html', locals())
|
||||
|
||||
def get_entrances(request, caveslug):
|
||||
cave = Cave.objects.get(caveslug__slug = caveslug)
|
||||
return render_with_context(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]})
|
||||
return render(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]})
|
||||
|
||||
def get_qms(request, caveslug):
|
||||
cave = Cave.objects.get(caveslug__slug = caveslug)
|
||||
return render_with_context(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]})
|
||||
return render(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]})
|
||||
|
||||
areanames = [
|
||||
#('', 'Location unclear'),
|
||||
@@ -313,7 +340,7 @@ def prospecting(request):
|
||||
caves = list(a.cave_set.all())
|
||||
caves.sort(caveCmp)
|
||||
areas.append((name, a, caves))
|
||||
return render_with_context(request, 'prospecting.html', {"areas": areas})
|
||||
return render(request, 'prospecting.html', {"areas": areas})
|
||||
|
||||
# Parameters for big map and zoomed subarea maps:
|
||||
# big map first (zoom factor ignored)
|
||||
@@ -336,7 +363,7 @@ maps = {
|
||||
"Grießkogel Area"],
|
||||
}
|
||||
|
||||
for n in maps.keys():
|
||||
for n in list(maps.keys()):
|
||||
L, T, R, B, S, name = maps[n]
|
||||
W = (R-L)/2
|
||||
H = (T-B)/2
|
||||
@@ -372,6 +399,7 @@ areacolours = {
|
||||
for FONT in [
|
||||
"/usr/share/fonts/truetype/freefont/FreeSans.ttf",
|
||||
"/usr/X11R6/lib/X11/fonts/truetype/arial.ttf",
|
||||
"/mnt/c/windows/fonts/arial.ttf",
|
||||
"C:\WINNT\Fonts\ARIAL.TTF"
|
||||
]:
|
||||
if os.path.isfile(FONT): break
|
||||
@@ -407,7 +435,7 @@ def plot(surveypoint, number, point_type, label, mapcode, draw, img):
|
||||
ss = SurvexStation.objects.lookup(surveypoint)
|
||||
E, N = ss.x, ss.y
|
||||
shortnumber = number.replace("—","")
|
||||
(x,y) = map(int, mungecoord(E, N, mapcode, img))
|
||||
(x,y) = list(map(int, mungecoord(E, N, mapcode, img)))
|
||||
#imgmaps[maparea].append( [x-4, y-SIZE/2, x+4+draw.textsize(shortnumber)[0], y+SIZE/2, shortnumber, label] )
|
||||
draw.rectangle([(x+CIRCLESIZE, y-TEXTSIZE/2), (x+CIRCLESIZE*2+draw.textsize(shortnumber)[0], y+TEXTSIZE/2)], fill="#ffffff")
|
||||
draw.text((x+CIRCLESIZE * 1.5,y-TEXTSIZE/2), shortnumber, fill="#000000")
|
||||
@@ -419,44 +447,44 @@ def prospecting_image(request, name):
|
||||
|
||||
mainImage = Image.open(os.path.join(settings.SURVEY_SCANS, "location_maps", "pguidemap.jpg"))
|
||||
if settings.PUBLIC_SITE and not request.user.is_authenticated():
|
||||
mainImage = Image.new("RGB", mainImage.size, '#ffffff')
|
||||
mainImage = Image.new("RGB", mainImage.size, '#ffffff')
|
||||
m = maps[name]
|
||||
#imgmaps = []
|
||||
if name == "all":
|
||||
img = mainImage
|
||||
img = mainImage
|
||||
else:
|
||||
M = maps['all']
|
||||
W, H = mainImage.size
|
||||
l = int((m[L] - M[L]) / (M[R] - M[L]) * W)
|
||||
t = int((m[T] - M[T]) / (M[B] - M[T]) * H)
|
||||
r = int((m[R] - M[L]) / (M[R] - M[L]) * W)
|
||||
b = int((m[B] - M[T]) / (M[B] - M[T]) * H)
|
||||
img = mainImage.crop((l, t, r, b))
|
||||
w = int(round(m[ZOOM] * (m[R] - m[L]) / (M[R] - M[L]) * W))
|
||||
h = int(round(m[ZOOM] * (m[B] - m[T]) / (M[B] - M[T]) * H))
|
||||
img = img.resize((w, h), Image.BICUBIC)
|
||||
M = maps['all']
|
||||
W, H = mainImage.size
|
||||
l = int((m[L] - M[L]) / (M[R] - M[L]) * W)
|
||||
t = int((m[T] - M[T]) / (M[B] - M[T]) * H)
|
||||
r = int((m[R] - M[L]) / (M[R] - M[L]) * W)
|
||||
b = int((m[B] - M[T]) / (M[B] - M[T]) * H)
|
||||
img = mainImage.crop((l, t, r, b))
|
||||
w = int(round(m[ZOOM] * (m[R] - m[L]) / (M[R] - M[L]) * W))
|
||||
h = int(round(m[ZOOM] * (m[B] - m[T]) / (M[B] - M[T]) * H))
|
||||
img = img.resize((w, h), Image.BICUBIC)
|
||||
draw = ImageDraw.Draw(img)
|
||||
draw.setfont(myFont)
|
||||
if name == "all":
|
||||
for maparea in maps.keys():
|
||||
if maparea == "all":
|
||||
continue
|
||||
localm = maps[maparea]
|
||||
l,t = mungecoord(localm[L], localm[T], "all", img)
|
||||
r,b = mungecoord(localm[R], localm[B], "all", img)
|
||||
text = maparea + " map"
|
||||
textlen = draw.textsize(text)[0] + 3
|
||||
draw.rectangle([l, t, l+textlen, t+TEXTSIZE+2], fill='#ffffff')
|
||||
draw.text((l+2, t+1), text, fill="#000000")
|
||||
#imgmaps.append( [l, t, l+textlen, t+SIZE+2, "submap" + maparea, maparea + " subarea map"] )
|
||||
draw.line([l, t, r, t], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([l, b, r, b], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([l, t, l, b], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([r, t, r, b], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([l, t, l+textlen, t], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([l, t+TEXTSIZE+2, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([l, t, l, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([l+textlen, t, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
|
||||
for maparea in list(maps.keys()):
|
||||
if maparea == "all":
|
||||
continue
|
||||
localm = maps[maparea]
|
||||
l,t = mungecoord(localm[L], localm[T], "all", img)
|
||||
r,b = mungecoord(localm[R], localm[B], "all", img)
|
||||
text = maparea + " map"
|
||||
textlen = draw.textsize(text)[0] + 3
|
||||
draw.rectangle([l, t, l+textlen, t+TEXTSIZE+2], fill='#ffffff')
|
||||
draw.text((l+2, t+1), text, fill="#000000")
|
||||
#imgmaps.append( [l, t, l+textlen, t+SIZE+2, "submap" + maparea, maparea + " subarea map"] )
|
||||
draw.line([l, t, r, t], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([l, b, r, b], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([l, t, l, b], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([r, t, r, b], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([l, t, l+textlen, t], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([l, t+TEXTSIZE+2, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([l, t, l, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
|
||||
draw.line([l+textlen, t, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
|
||||
#imgmaps[maparea] = []
|
||||
# Draw scale bar
|
||||
m100 = int(100 / (m[R] - m[L]) * img.size[0])
|
||||
@@ -478,24 +506,24 @@ def prospecting_image(request, name):
|
||||
plot("laser.0_5", "LSR5", "Reference", "Laser Point 0/5", name, draw, img)
|
||||
plot("225-96", "BAlm", "Reference", "Bräuning Alm trig point", name, draw, img)
|
||||
for entrance in Entrance.objects.all():
|
||||
station = entrance.best_station()
|
||||
if station:
|
||||
#try:
|
||||
areaName = entrance.caveandentrance_set.all()[0].cave.getArea().short_name
|
||||
plot(station, "%s-%s" % (areaName, str(entrance)[5:]), entrance.needs_surface_work(), str(entrance), name, draw, img)
|
||||
#except:
|
||||
# pass
|
||||
|
||||
for (N, E, D, num) in [(35975.37, 83018.21, 100,"177"), # Calculated from bearings
|
||||
(35350.00, 81630.00, 50, "71"), # From Auer map
|
||||
(36025.00, 82475.00, 50, "146"), # From mystery map
|
||||
(35600.00, 82050.00, 50, "35"), # From Auer map
|
||||
(35650.00, 82025.00, 50, "44"), # From Auer map
|
||||
(36200.00, 82925.00, 50, "178"), # Calculated from bearings
|
||||
(35232.64, 82910.37, 25, "181"), # Calculated from bearings
|
||||
(35323.60, 81357.83, 50, "74") # From Auer map
|
||||
station = entrance.best_station()
|
||||
if station:
|
||||
#try:
|
||||
areaName = entrance.caveandentrance_set.all()[0].cave.getArea().short_name
|
||||
plot(station, "%s-%s" % (areaName, str(entrance)[5:]), entrance.needs_surface_work(), str(entrance), name, draw, img)
|
||||
#except:
|
||||
# pass
|
||||
|
||||
for (N, E, D, num) in [(35975.37, 83018.21, 100,"177"), # Calculated from bearings
|
||||
(35350.00, 81630.00, 50, "71"), # From Auer map
|
||||
(36025.00, 82475.00, 50, "146"), # From mystery map
|
||||
(35600.00, 82050.00, 50, "35"), # From Auer map
|
||||
(35650.00, 82025.00, 50, "44"), # From Auer map
|
||||
(36200.00, 82925.00, 50, "178"), # Calculated from bearings
|
||||
(35232.64, 82910.37, 25, "181"), # Calculated from bearings
|
||||
(35323.60, 81357.83, 50, "74") # From Auer map
|
||||
]:
|
||||
(N,E,D) = map(float, (N, E, D))
|
||||
(N,E,D) = list(map(float, (N, E, D)))
|
||||
maparea = Cave.objects.get(kataster_number = num).getArea().short_name
|
||||
lo = mungecoord(N-D, E+D, name, img)
|
||||
hi = mungecoord(N+D, E-D, name, img)
|
||||
|
||||
120
core/views_logbooks.py
Normal file → Executable file
120
core/views_logbooks.py
Normal file → Executable file
@@ -1,4 +1,4 @@
|
||||
from django.shortcuts import render_to_response
|
||||
from django.shortcuts import render_to_response, render
|
||||
from troggle.core.models import Expedition, Person, PersonExpedition, PersonTrip, LogbookEntry, SurvexBlock
|
||||
import troggle.core.models as models
|
||||
import troggle.settings as settings
|
||||
@@ -9,7 +9,6 @@ from troggle.core.forms import getTripForm#, get_name, PersonForm
|
||||
from django.core.urlresolvers import reverse
|
||||
from django.http import HttpResponseRedirect, HttpResponse
|
||||
from django.template import Context, loader
|
||||
from utils import render_with_context
|
||||
import os.path
|
||||
import troggle.parsers.logbooks as logbookparsers
|
||||
from django.template.defaultfilters import slugify
|
||||
@@ -55,7 +54,7 @@ def personindex(request):
|
||||
if person.bisnotable():
|
||||
notablepersons.append(person)
|
||||
|
||||
return render_with_context(request,'personindex.html', {'persons': persons, 'personss':personss, 'notablepersons':notablepersons, })
|
||||
return render(request,'personindex.html', {'persons': persons, 'personss':personss, 'notablepersons':notablepersons})
|
||||
|
||||
|
||||
def expedition(request, expeditionname):
|
||||
@@ -75,10 +74,9 @@ def expedition(request, expeditionname):
|
||||
prow.append(pcell)
|
||||
personexpeditiondays.append({"personexpedition":personexpedition, "personrow":prow})
|
||||
|
||||
message = ""
|
||||
if "reload" in request.GET:
|
||||
message = LoadLogbookForExpedition(this_expedition)
|
||||
return render_with_context(request,'expedition.html', {'expedition': this_expedition, 'expeditions':expeditions, 'personexpeditiondays':personexpeditiondays, 'message':message, 'settings':settings, 'dateditems': dateditems })
|
||||
LoadLogbookForExpedition(this_expedition)
|
||||
return render(request,'expedition.html', {'expedition': this_expedition, 'expeditions':expeditions, 'personexpeditiondays':personexpeditiondays, 'settings':settings, 'dateditems': dateditems })
|
||||
|
||||
def get_absolute_url(self):
|
||||
return ('expedition', (expedition.year))
|
||||
@@ -103,13 +101,13 @@ def person(request, first_name='', last_name='', ):
|
||||
this_person.save()
|
||||
return HttpResponseRedirect(reverse('profiles_select_profile'))
|
||||
|
||||
return render_with_context(request,'person.html', {'person': this_person, })
|
||||
return render(request,'person.html', {'person': this_person, })
|
||||
|
||||
|
||||
def GetPersonChronology(personexpedition):
|
||||
res = { }
|
||||
for persontrip in personexpedition.persontrip_set.all():
|
||||
a = res.setdefault(persontrip.date, { })
|
||||
a = res.setdefault(persontrip.logbook_entry.date, { })
|
||||
a.setdefault("persontrips", [ ]).append(persontrip)
|
||||
|
||||
for personrole in personexpedition.survexpersonrole_set.all():
|
||||
@@ -136,17 +134,17 @@ def personexpedition(request, first_name='', last_name='', year=''):
|
||||
this_expedition = Expedition.objects.get(year=year)
|
||||
personexpedition = person.personexpedition_set.get(expedition=this_expedition)
|
||||
personchronology = GetPersonChronology(personexpedition)
|
||||
return render_with_context(request,'personexpedition.html', {'personexpedition': personexpedition, 'personchronology':personchronology})
|
||||
return render(request,'personexpedition.html', {'personexpedition': personexpedition, 'personchronology':personchronology})
|
||||
|
||||
|
||||
def logbookentry(request, date, slug):
|
||||
this_logbookentry = LogbookEntry.objects.filter(date=date, slug=slug)
|
||||
|
||||
if len(this_logbookentry)>1:
|
||||
return render_with_context(request, 'object_list.html',{'object_list':this_logbookentry})
|
||||
return render(request, 'object_list.html',{'object_list':this_logbookentry})
|
||||
else:
|
||||
this_logbookentry=this_logbookentry[0]
|
||||
return render_with_context(request, 'logbookentry.html', {'logbookentry': this_logbookentry})
|
||||
return render(request, 'logbookentry.html', {'logbookentry': this_logbookentry})
|
||||
|
||||
|
||||
def logbookSearch(request, extra):
|
||||
@@ -157,31 +155,104 @@ def logbookSearch(request, extra):
|
||||
entry_query = search.get_query(query_string, ['text','title',])
|
||||
found_entries = LogbookEntry.objects.filter(entry_query)
|
||||
|
||||
return render_with_context(request,'logbooksearch.html',
|
||||
return render(request,'logbooksearch.html',
|
||||
{ 'query_string': query_string, 'found_entries': found_entries, })
|
||||
#context_instance=RequestContext(request))
|
||||
|
||||
def personForm(request,pk):
|
||||
person=Person.objects.get(pk=pk)
|
||||
form=PersonForm(instance=person)
|
||||
return render_with_context(request,'personform.html', {'form':form,})
|
||||
return render(request,'personform.html', {'form':form,})
|
||||
|
||||
from settings import *
|
||||
def pathsreport(request):
|
||||
pathsdict={
|
||||
"ADMIN_MEDIA_PREFIX" : ADMIN_MEDIA_PREFIX,
|
||||
"ADMIN_MEDIA_PREFIX" : ADMIN_MEDIA_PREFIX,
|
||||
"CAVEDESCRIPTIONSX" : CAVEDESCRIPTIONS,
|
||||
"DIR_ROOT" : DIR_ROOT,
|
||||
"ENTRANCEDESCRIPTIONS" : ENTRANCEDESCRIPTIONS,
|
||||
"EXPOUSER_EMAIL" : EXPOUSER_EMAIL,
|
||||
"EXPOUSERPASS" :"<redacted>",
|
||||
"EXPOUSER" : EXPOUSER,
|
||||
"EXPOWEB" : EXPOWEB,
|
||||
"EXPOWEB_URL" : EXPOWEB_URL,
|
||||
"FILES" : FILES,
|
||||
"JSLIB_URL" : JSLIB_URL,
|
||||
"LOGFILE" : LOGFILE,
|
||||
"LOGIN_REDIRECT_URL" : LOGIN_REDIRECT_URL,
|
||||
"MEDIA_ADMIN_DIR" : MEDIA_ADMIN_DIR,
|
||||
"MEDIA_ROOT" : MEDIA_ROOT,
|
||||
"MEDIA_URL" : MEDIA_URL,
|
||||
#"PHOTOS_ROOT" : PHOTOS_ROOT,
|
||||
"PHOTOS_URL" : PHOTOS_URL,
|
||||
"PYTHON_PATH" : PYTHON_PATH,
|
||||
"REPOS_ROOT_PATH" : REPOS_ROOT_PATH,
|
||||
"ROOT_URLCONF" : ROOT_URLCONF,
|
||||
"STATIC_ROOT" : STATIC_ROOT,
|
||||
"STATIC_URL" : STATIC_URL,
|
||||
"SURVEX_DATA" : SURVEX_DATA,
|
||||
"SURVEY_SCANS" : SURVEY_SCANS,
|
||||
"SURVEYS" : SURVEYS,
|
||||
"SURVEYS_URL" : SURVEYS_URL,
|
||||
"SVX_URL" : SVX_URL,
|
||||
"TEMPLATE_DIRS" : TEMPLATE_DIRS,
|
||||
"THREEDCACHEDIR" : THREEDCACHEDIR,
|
||||
"TINY_MCE_MEDIA_ROOT" : TINY_MCE_MEDIA_ROOT,
|
||||
"TINY_MCE_MEDIA_URL" : TINY_MCE_MEDIA_URL,
|
||||
"TUNNEL_DATA" : TUNNEL_DATA,
|
||||
"URL_ROOT" : URL_ROOT
|
||||
}
|
||||
|
||||
ncodes = len(pathsdict)
|
||||
|
||||
bycodeslist = sorted(pathsdict.iteritems())
|
||||
bypathslist = sorted(pathsdict.iteritems(), key=lambda x: x[1])
|
||||
|
||||
return render(request, 'pathsreport.html', {
|
||||
"pathsdict":pathsdict,
|
||||
"bycodeslist":bycodeslist,
|
||||
"bypathslist":bypathslist,
|
||||
"ncodes":ncodes})
|
||||
|
||||
|
||||
|
||||
def experimental(request):
|
||||
blockroots = models.SurvexBlock.objects.filter(name="root")
|
||||
if len(blockroots)>1:
|
||||
print(" ! more than one root survexblock {}".format(len(blockroots)))
|
||||
for sbr in blockroots:
|
||||
print("{} {} {} {}".format(sbr.id, sbr.name, sbr.text, sbr.date))
|
||||
sbr = blockroots[0]
|
||||
totalsurvexlength = sbr.totalleglength
|
||||
try:
|
||||
nimportlegs = int(sbr.text)
|
||||
except:
|
||||
print("{} {} {} {}".format(sbr.id, sbr.name, sbr.text, sbr.date))
|
||||
nimportlegs = -1
|
||||
|
||||
legsbyexpo = [ ]
|
||||
addupsurvexlength = 0
|
||||
for expedition in Expedition.objects.all():
|
||||
survexblocks = expedition.survexblock_set.all()
|
||||
survexlegs = [ ]
|
||||
#survexlegs = [ ]
|
||||
legsyear=0
|
||||
survexleglength = 0.0
|
||||
for survexblock in survexblocks:
|
||||
survexlegs.extend(survexblock.survexleg_set.all())
|
||||
#survexlegs.extend(survexblock.survexleg_set.all())
|
||||
survexleglength += survexblock.totalleglength
|
||||
legsbyexpo.append((expedition, {"nsurvexlegs":len(survexlegs), "survexleglength":survexleglength}))
|
||||
legsbyexpo.reverse()
|
||||
|
||||
survexlegs = models.SurvexLeg.objects.all()
|
||||
totalsurvexlength = sum([survexleg.tape for survexleg in survexlegs])
|
||||
return render_with_context(request, 'experimental.html', { "nsurvexlegs":len(survexlegs), "totalsurvexlength":totalsurvexlength, "legsbyexpo":legsbyexpo })
|
||||
try:
|
||||
legsyear += int(survexblock.text)
|
||||
except:
|
||||
pass
|
||||
addupsurvexlength += survexleglength
|
||||
legsbyexpo.append((expedition, {"nsurvexlegs":legsyear, "survexleglength":survexleglength}))
|
||||
legsbyexpo.reverse()
|
||||
|
||||
#removing survexleg objects completely
|
||||
#survexlegs = models.SurvexLeg.objects.all()
|
||||
#totalsurvexlength = sum([survexleg.tape for survexleg in survexlegs])
|
||||
return render(request, 'experimental.html', { "nsurvexlegs":nimportlegs, "totalsurvexlength":totalsurvexlength, "addupsurvexlength":addupsurvexlength, "legsbyexpo":legsbyexpo })
|
||||
|
||||
@login_required_if_public
|
||||
def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
|
||||
@@ -240,7 +311,7 @@ def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
|
||||
tripForm = TripForm() # An unbound form
|
||||
personTripFormSet = PersonTripFormSet()
|
||||
|
||||
return render_with_context(request, 'newlogbookentry.html', {
|
||||
return render(request, 'newlogbookentry.html', {
|
||||
'tripForm': tripForm,
|
||||
'personTripFormSet': personTripFormSet,
|
||||
|
||||
@@ -262,9 +333,8 @@ def delLogbookEntry(lbe):
|
||||
|
||||
def get_people(request, expeditionslug):
|
||||
exp = Expedition.objects.get(year = expeditionslug)
|
||||
return render_with_context(request,'options.html', {"items": [(pe.slug, pe.name) for pe in exp.personexpedition_set.all()]})
|
||||
return render(request,'options.html', {"items": [(pe.slug, pe.name) for pe in exp.personexpedition_set.all()]})
|
||||
|
||||
def get_logbook_entries(request, expeditionslug):
|
||||
exp = Expedition.objects.get(year = expeditionslug)
|
||||
return render_with_context(request,'options.html', {"items": [(le.slug, "%s - %s" % (le.date, le.title)) for le in exp.logbookentry_set.all()]})
|
||||
|
||||
return render(request,'options.html', {"items": [(le.slug, "%s - %s" % (le.date, le.title)) for le in exp.logbookentry_set.all()]})
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
from troggle.core.models import Cave, Expedition, Person, LogbookEntry, PersonExpedition, PersonTrip, DPhoto, QM
|
||||
#from troggle.core.forms import UploadFileForm
|
||||
from troggle.core.models import Cave, Expedition, Person, LogbookEntry, PersonExpedition, PersonTrip, QM
|
||||
#from troggle.core.forms import UploadFileForm, DPhoto
|
||||
from django.conf import settings
|
||||
from django import forms
|
||||
from django.template import loader, Context
|
||||
from django.db.models import Q
|
||||
from django.shortcuts import render
|
||||
import databaseReset
|
||||
import re
|
||||
from django.http import HttpResponse, HttpResponseRedirect
|
||||
from django.core.urlresolvers import reverse
|
||||
from utils import render_with_context
|
||||
from troggle.core.models import *
|
||||
from troggle.helper import login_required_if_public
|
||||
|
||||
@@ -21,18 +21,18 @@ def stats(request):
|
||||
statsDict['caveCount'] = int(Cave.objects.count())
|
||||
statsDict['personCount'] = int(Person.objects.count())
|
||||
statsDict['logbookEntryCount'] = int(LogbookEntry.objects.count())
|
||||
return render_with_context(request,'statistics.html', statsDict)
|
||||
return render(request,'statistics.html', statsDict)
|
||||
|
||||
def frontpage(request):
|
||||
if request.user.is_authenticated():
|
||||
return render_with_context(request,'tasks.html')
|
||||
return render(request,'tasks.html')
|
||||
|
||||
expeditions = Expedition.objects.order_by("-year")
|
||||
logbookentry = LogbookEntry
|
||||
cave = Cave
|
||||
photo = DPhoto
|
||||
#photo = DPhoto
|
||||
from django.contrib.admin.templatetags import log
|
||||
return render_with_context(request,'frontpage.html', locals())
|
||||
return render(request,'frontpage.html', locals())
|
||||
|
||||
def todo(request):
|
||||
message = "no test message" #reverse('personn', kwargs={"name":"hkjhjh"})
|
||||
@@ -45,7 +45,7 @@ def todo(request):
|
||||
|
||||
expeditions = Expedition.objects.order_by("-year")
|
||||
totallogbookentries = LogbookEntry.objects.count()
|
||||
return render_with_context(request,'index.html', {'expeditions':expeditions, 'all':'all', 'totallogbookentries':totallogbookentries, "message":message})
|
||||
return render(request,'index.html', {'expeditions':expeditions, 'all':'all', 'totallogbookentries':totallogbookentries, "message":message})
|
||||
|
||||
|
||||
def controlPanel(request):
|
||||
@@ -55,34 +55,21 @@ def controlPanel(request):
|
||||
|
||||
#importlist is mostly here so that things happen in the correct order.
|
||||
#http post data seems to come in an unpredictable order, so we do it this way.
|
||||
importlist=['reload_db', 'import_people', 'import_cavetab', 'import_logbooks', 'import_surveys', 'import_QMs']
|
||||
databaseReset.make_dirs()
|
||||
importlist=['reinit_db', 'import_people', 'import_caves', 'import_logbooks',
|
||||
'import_survexblks', 'import_QMs', 'import_survexpos', 'import_surveyscans', 'import_tunnelfiles']
|
||||
databaseReset.dirsredirect()
|
||||
for item in importlist:
|
||||
if item in request.POST:
|
||||
print "running"+ " databaseReset."+item+"()"
|
||||
exec "databaseReset."+item+"()"
|
||||
print("running"+ " databaseReset."+item+"()")
|
||||
exec("databaseReset."+item+"()")
|
||||
jobs_completed.append(item)
|
||||
else:
|
||||
if request.user.is_authenticated(): #The user is logged in, but is not a superuser.
|
||||
return render_with_context(request,'controlPanel.html', {'caves':Cave.objects.all(),'error':'You must be a superuser to use that feature.'})
|
||||
return render(request,'controlPanel.html', {'caves':Cave.objects.all(),'error':'You must be a superuser to use that feature.'})
|
||||
else:
|
||||
return HttpResponseRedirect(reverse('auth_login'))
|
||||
|
||||
return render_with_context(request,'controlPanel.html', {'caves':Cave.objects.all(),'expeditions':Expedition.objects.all(),'jobs_completed':jobs_completed})
|
||||
|
||||
def downloadCavetab(request):
|
||||
from export import tocavetab
|
||||
response = HttpResponse(mimetype='text/csv')
|
||||
response['Content-Disposition'] = 'attachment; filename=CAVETAB2.CSV'
|
||||
tocavetab.writeCaveTab(response)
|
||||
return response
|
||||
|
||||
def downloadSurveys(request):
|
||||
from export import tosurveys
|
||||
response = HttpResponse(mimetype='text/csv')
|
||||
response['Content-Disposition'] = 'attachment; filename=Surveys.csv'
|
||||
tosurveys.writeCaveTab(response)
|
||||
return response
|
||||
return render(request,'controlPanel.html', {'caves':Cave.objects.all(),'expeditions':Expedition.objects.all(),'jobs_completed':jobs_completed})
|
||||
|
||||
def downloadLogbook(request,year=None,extension=None,queryset=None):
|
||||
|
||||
@@ -94,20 +81,19 @@ def downloadLogbook(request,year=None,extension=None,queryset=None):
|
||||
logbook_entries=queryset
|
||||
filename='logbook'
|
||||
else:
|
||||
response = HttpResponse(content_type='text/plain')
|
||||
return response(r"Error: Logbook downloader doesn't know what year you want")
|
||||
|
||||
if 'year' in request.GET:
|
||||
year=request.GET['year']
|
||||
if 'extension' in request.GET:
|
||||
extension=request.GET['extension']
|
||||
|
||||
|
||||
|
||||
|
||||
if extension =='txt':
|
||||
response = HttpResponse(mimetype='text/plain')
|
||||
response = HttpResponse(content_type='text/plain')
|
||||
style='2008'
|
||||
elif extension == 'html':
|
||||
response = HttpResponse(mimetype='text/html')
|
||||
response = HttpResponse(content_type='text/html')
|
||||
style='2005'
|
||||
|
||||
template='logbook'+style+'style.'+extension
|
||||
@@ -124,11 +110,11 @@ def downloadQMs(request):
|
||||
try:
|
||||
cave=Cave.objects.get(kataster_number=request.GET['cave_id'])
|
||||
except Cave.DoesNotExist:
|
||||
cave=Cave.objects.get(name=cave_id)
|
||||
cave=Cave.objects.get(name=request.GET['cave_id'])
|
||||
|
||||
from export import toqms
|
||||
|
||||
response = HttpResponse(mimetype='text/csv')
|
||||
response = HttpResponse(content_type='text/csv')
|
||||
response['Content-Disposition'] = 'attachment; filename=qm.csv'
|
||||
toqms.writeQmTable(response,cave)
|
||||
return response
|
||||
@@ -136,7 +122,7 @@ def downloadQMs(request):
|
||||
def ajax_test(request):
|
||||
post_text = request.POST['post_data']
|
||||
return HttpResponse("{'response_text': '"+post_text+" recieved.'}",
|
||||
mimetype="application/json")
|
||||
content_type="application/json")
|
||||
|
||||
def eyecandy(request):
|
||||
return
|
||||
@@ -144,9 +130,9 @@ def eyecandy(request):
|
||||
def ajax_QM_number(request):
|
||||
if request.method=='POST':
|
||||
cave=Cave.objects.get(id=request.POST['cave'])
|
||||
print cave
|
||||
print(cave)
|
||||
exp=Expedition.objects.get(pk=request.POST['year'])
|
||||
print exp
|
||||
print(exp)
|
||||
res=cave.new_QM_number(exp.year)
|
||||
|
||||
return HttpResponse(res)
|
||||
@@ -167,7 +153,7 @@ def logbook_entry_suggestions(request):
|
||||
#unwiki_QMs=re.findall(unwiki_QM_pattern,lbo.text)
|
||||
unwiki_QMs=[m.groupdict() for m in unwiki_QM_pattern.finditer(lbo.text)]
|
||||
|
||||
print unwiki_QMs
|
||||
print(unwiki_QMs)
|
||||
for qm in unwiki_QMs:
|
||||
#try:
|
||||
if len(qm['year'])==2:
|
||||
@@ -180,7 +166,7 @@ def logbook_entry_suggestions(request):
|
||||
try:
|
||||
lbo=LogbookEntry.objects.get(date__year=qm['year'],title__icontains="placeholder for QMs in")
|
||||
except:
|
||||
print "failed to get placeholder for year "+str(qm['year'])
|
||||
print("failed to get placeholder for year "+str(qm['year']))
|
||||
|
||||
temp_QM=QM(found_by=lbo,number=qm['number'],grade=qm['grade'])
|
||||
temp_QM.grade=qm['grade']
|
||||
@@ -188,7 +174,7 @@ def logbook_entry_suggestions(request):
|
||||
#except:
|
||||
#print 'failed'
|
||||
|
||||
print unwiki_QMs
|
||||
print(unwiki_QMs)
|
||||
|
||||
|
||||
#wikilink_QMs=re.findall(wikilink_QM_pattern,lbo.text)
|
||||
@@ -199,10 +185,10 @@ def logbook_entry_suggestions(request):
|
||||
#for qm in wikilink_QMs:
|
||||
#Try to look up the QM.
|
||||
|
||||
print 'got 208'
|
||||
print('got 208')
|
||||
any_suggestions=True
|
||||
print 'got 210'
|
||||
return render_with_context(request,'suggestions.html',
|
||||
print('got 210')
|
||||
return render(request,'suggestions.html',
|
||||
{
|
||||
'unwiki_QMs':unwiki_QMs,
|
||||
'any_suggestions':any_suggestions
|
||||
@@ -262,7 +248,7 @@ def newFile(request, pslug = None):
|
||||
# else:
|
||||
# fileform = UploadFileForm() # An unbound form
|
||||
|
||||
return render_with_context(request, 'editfile.html', {
|
||||
return render(request, 'editfile.html', {
|
||||
'fileForm': fileform,
|
||||
|
||||
})
|
||||
|
||||
150
core/views_survex.py
Normal file → Executable file
150
core/views_survex.py
Normal file → Executable file
@@ -1,6 +1,7 @@
|
||||
from django import forms
|
||||
from django.http import HttpResponseRedirect, HttpResponse
|
||||
from django.shortcuts import render_to_response
|
||||
from django.shortcuts import render_to_response, render
|
||||
from django.core.context_processors import csrf
|
||||
from django.http import HttpResponse, Http404
|
||||
import re
|
||||
import os
|
||||
@@ -14,47 +15,76 @@ from parsers.people import GetPersonExpeditionNameLookup
|
||||
import troggle.settings as settings
|
||||
import parsers.survex
|
||||
|
||||
survextemplatefile = """; Locn: Totes Gebirge, Austria - Loser/Augst-Eck Plateau (kataster group 1623)
|
||||
; Cave:
|
||||
survextemplatefile = """; *** THIS IS A TEMPLATE FILE NOT WHAT YOU MIGHT BE EXPECTING ***
|
||||
|
||||
*** DO NOT SAVE THIS FILE WITHOUT RENAMING IT !! ***
|
||||
;[Stuff in square brackets is example text to be replaced with real data,
|
||||
; removing the square brackets]
|
||||
|
||||
*begin [surveyname]
|
||||
|
||||
*export [connecting stations]
|
||||
; stations linked into other surveys (or likely to)
|
||||
*export [1 8 12 34]
|
||||
|
||||
*title "area title"
|
||||
*date 2999.99.99
|
||||
*team Insts [Caver]
|
||||
*team Insts [Caver]
|
||||
*team Notes [Caver]
|
||||
*instrument [set number]
|
||||
; Cave:
|
||||
; Area in cave/QM:
|
||||
*title ""
|
||||
*date [2040.07.04] ; <-- CHANGE THIS DATE
|
||||
*team Insts [Fred Fossa]
|
||||
*team Notes [Brenda Badger]
|
||||
*team Pics [Luke Lynx]
|
||||
*team Tape [Albert Aadvark]
|
||||
*instrument [SAP #+Laser Tape/DistoX/Compass # ; Clino #]
|
||||
; Calibration: [Where, readings]
|
||||
*ref [2040#00] ; <-- CHANGE THIS TOO
|
||||
; the #number is on the clear pocket containing the original notes
|
||||
|
||||
;ref.: 2009#NN
|
||||
; if using a tape:
|
||||
*calibrate tape +0.0 ; +ve if tape was too short, -ve if too long
|
||||
|
||||
*calibrate tape +0.0 ; +ve if tape was too short, -ve if too long
|
||||
; Centreline data
|
||||
*data normal from to length bearing gradient ignoreall
|
||||
[ 1 2 5.57 034.5 -12.8 ]
|
||||
|
||||
*data normal from to tape compass clino
|
||||
1 2 3.90 298 -20
|
||||
;-----------
|
||||
;recorded station details (leave commented out)
|
||||
;(NP=Nail Polish, LHW/RHW=Left/Right Hand Wall)
|
||||
;Station Left Right Up Down Description
|
||||
;[Red] nail varnish markings
|
||||
[;1 0.8 0 5.3 1.6 ; NP on boulder. pt 23 on foo survey ]
|
||||
[;2 0.3 1.2 6 1.2 ; NP '2' LHW ]
|
||||
[;3 1.3 0 3.4 0.2 ; Rock on floor - not refindable ]
|
||||
|
||||
*data passage station left right up down ignoreall
|
||||
1 [L] [R] [U] [D] comment
|
||||
|
||||
*end [surveyname]"""
|
||||
|
||||
|
||||
def ReplaceTabs(stext):
|
||||
res = [ ]
|
||||
nsl = 0
|
||||
for s in re.split("(\t|\n)", stext):
|
||||
if s == "\t":
|
||||
res.append(" " * (4 - (nsl % 4)))
|
||||
nsl = 0
|
||||
continue
|
||||
if s == "\n":
|
||||
nsl = 0
|
||||
else:
|
||||
nsl += len(s)
|
||||
res.append(s)
|
||||
return "".join(res)
|
||||
;LRUDs arranged into passage tubes
|
||||
;new *data command for each 'passage',
|
||||
;repeat stations and adjust numbers as needed
|
||||
*data passage station left right up down
|
||||
;[ 1 0.8 0 5.3 1.6 ]
|
||||
;[ 2 0.3 1.2 6 1.2 ]
|
||||
*data passage station left right up down
|
||||
;[ 1 1.3 1.5 5.3 1.6 ]
|
||||
;[ 3 2.4 0 3.4 0.2 ]
|
||||
|
||||
|
||||
;-----------
|
||||
;Question Mark List ;(leave commented-out)
|
||||
; The nearest-station is the name of the survey and station which are nearest to
|
||||
; the QM. The resolution-station is either '-' to indicate that the QM hasn't
|
||||
; been checked; or the name of the survey and station which push that QM. If a
|
||||
; QM doesn't go anywhere, set the resolution-station to be the same as the
|
||||
; nearest-station. Include any relevant details of how to find or push the QM in
|
||||
; the textual description.
|
||||
;Serial number grade(A/B/C/X) nearest-station resolution-station description
|
||||
;[ QM1 A surveyname.3 - description of QM ]
|
||||
;[ QM2 B surveyname.5 - description of QM ]
|
||||
|
||||
;------------
|
||||
;Cave description ;(leave commented-out)
|
||||
;freeform text describing this section of the cave
|
||||
|
||||
*end [surveyname]
|
||||
"""
|
||||
|
||||
|
||||
class SvxForm(forms.Form):
|
||||
@@ -62,15 +92,14 @@ class SvxForm(forms.Form):
|
||||
filename = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
|
||||
datetime = forms.DateTimeField(widget=forms.TextInput(attrs={"readonly":True}))
|
||||
outputtype = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
|
||||
code = forms.CharField(widget=forms.Textarea(attrs={"cols":150, "rows":18}))
|
||||
code = forms.CharField(widget=forms.Textarea(attrs={"cols":150, "rows":36}))
|
||||
|
||||
def GetDiscCode(self):
|
||||
fname = settings.SURVEX_DATA + self.data['filename'] + ".svx"
|
||||
if not os.path.isfile(fname):
|
||||
return survextemplatefile
|
||||
fin = open(fname, "rb")
|
||||
svxtext = fin.read().decode("latin1") # unicode(a, "latin1")
|
||||
svxtext = ReplaceTabs(svxtext).strip()
|
||||
fin = open(fname, "rt")
|
||||
svxtext = fin.read().encode("utf8")
|
||||
fin.close()
|
||||
return svxtext
|
||||
|
||||
@@ -83,19 +112,28 @@ class SvxForm(forms.Form):
|
||||
def SaveCode(self, rcode):
|
||||
fname = settings.SURVEX_DATA + self.data['filename'] + ".svx"
|
||||
if not os.path.isfile(fname):
|
||||
# only save if appears valid
|
||||
if re.search(r"\[|\]", rcode):
|
||||
return "Error: clean up all []s from the text"
|
||||
return "Error: remove all []s from the text. They are only template guidance."
|
||||
mbeginend = re.search(r"(?s)\*begin\s+(\w+).*?\*end\s+(\w+)", rcode)
|
||||
if not mbeginend:
|
||||
return "Error: no begin/end block here"
|
||||
if mbeginend.group(1) != mbeginend.group(2):
|
||||
return "Error: mismatching beginend"
|
||||
|
||||
fout = open(fname, "w")
|
||||
res = fout.write(rcode.encode("latin1"))
|
||||
return "Error: mismatching begin/end labels"
|
||||
|
||||
# Make this create new survex folders if needed
|
||||
try:
|
||||
fout = open(fname, "wb")
|
||||
except IOError:
|
||||
pth = os.path.dirname(self.data['filename'])
|
||||
newpath = os.path.join(settings.SURVEX_DATA, pth)
|
||||
if not os.path.exists(newpath):
|
||||
os.makedirs(newpath)
|
||||
fout = open(fname, "wb")
|
||||
|
||||
# javascript seems to insert CRLF on WSL1 whatever you say. So fix that:
|
||||
res = fout.write(rcode.replace("\r",""))
|
||||
fout.close()
|
||||
return "SAVED"
|
||||
return "SAVED ."
|
||||
|
||||
def Process(self):
|
||||
print("....\n\n\n....Processing\n\n\n")
|
||||
@@ -103,7 +141,7 @@ class SvxForm(forms.Form):
|
||||
os.chdir(os.path.split(settings.SURVEX_DATA + self.data['filename'])[0])
|
||||
os.system(settings.CAVERN + " --log " + settings.SURVEX_DATA + self.data['filename'] + ".svx")
|
||||
os.chdir(cwd)
|
||||
fin = open(settings.SURVEX_DATA + self.data['filename'] + ".log", "rb")
|
||||
fin = open(settings.SURVEX_DATA + self.data['filename'] + ".log", "rt")
|
||||
log = fin.read()
|
||||
fin.close()
|
||||
log = re.sub("(?s).*?(Survey contains)", "\\1", log)
|
||||
@@ -143,7 +181,6 @@ def svx(request, survex_file):
|
||||
form.data['code'] = rcode
|
||||
if "save" in rform.data:
|
||||
if request.user.is_authenticated():
|
||||
#print("sssavvving")
|
||||
message = form.SaveCode(rcode)
|
||||
else:
|
||||
message = "You do not have authority to save this file"
|
||||
@@ -172,13 +209,14 @@ def svx(request, survex_file):
|
||||
'difflist': difflist,
|
||||
'logmessage':logmessage,
|
||||
'form':form}
|
||||
vmap.update(csrf(request))
|
||||
if outputtype == "ajax":
|
||||
return render_to_response('svxfiledifflistonly.html', vmap)
|
||||
return render_to_response('svxfile.html', vmap)
|
||||
|
||||
def svxraw(request, survex_file):
|
||||
svx = open(os.path.join(settings.SURVEX_DATA, survex_file+".svx"), "rb")
|
||||
return HttpResponse(svx, mimetype="text")
|
||||
svx = open(os.path.join(settings.SURVEX_DATA, survex_file+".svx"), "rt",encoding='utf8')
|
||||
return HttpResponse(svx, content_type="text")
|
||||
|
||||
|
||||
# The cavern running function
|
||||
@@ -192,21 +230,21 @@ def process(survex_file):
|
||||
def threed(request, survex_file):
|
||||
process(survex_file)
|
||||
try:
|
||||
threed = open(settings.SURVEX_DATA + survex_file + ".3d", "rb")
|
||||
return HttpResponse(threed, mimetype="model/3d")
|
||||
threed = open(settings.SURVEX_DATA + survex_file + ".3d", "rt",encoding='utf8')
|
||||
return HttpResponse(threed, content_type="model/3d")
|
||||
except:
|
||||
log = open(settings.SURVEX_DATA + survex_file + ".log", "rb")
|
||||
return HttpResponse(log, mimetype="text")
|
||||
log = open(settings.SURVEX_DATA + survex_file + ".log", "rt",encoding='utf8')
|
||||
return HttpResponse(log, content_type="text")
|
||||
|
||||
def log(request, survex_file):
|
||||
process(survex_file)
|
||||
log = open(settings.SURVEX_DATA + survex_file + ".log", "rb")
|
||||
return HttpResponse(log, mimetype="text")
|
||||
log = open(settings.SURVEX_DATA + survex_file + ".log", "rt",encoding='utf8')
|
||||
return HttpResponse(log, content_type="text")
|
||||
|
||||
def err(request, survex_file):
|
||||
process(survex_file)
|
||||
err = open(settings.SURVEX_DATA + survex_file + ".err", "rb")
|
||||
return HttpResponse(err, mimetype="text")
|
||||
err = open(settings.SURVEX_DATA + survex_file + ".err", "rt",encoding='utf8')
|
||||
return HttpResponse(err, content_type="text")
|
||||
|
||||
|
||||
|
||||
|
||||
581
databaseReset.py
Normal file → Executable file
581
databaseReset.py
Normal file → Executable file
@@ -1,189 +1,403 @@
|
||||
from __future__ import (absolute_import, division,
|
||||
print_function)
|
||||
import os
|
||||
import time
|
||||
import timeit
|
||||
import json
|
||||
|
||||
import settings
|
||||
if os.geteuid() == 0:
|
||||
print("This script should be run as expo not root - quitting")
|
||||
exit()
|
||||
|
||||
os.environ['PYTHONPATH'] = settings.PYTHON_PATH
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
|
||||
|
||||
from django.core import management
|
||||
from django.db import connection
|
||||
from django.db import connection, close_old_connections
|
||||
from django.contrib.auth.models import User
|
||||
from django.http import HttpResponse
|
||||
from django.core.urlresolvers import reverse
|
||||
from troggle.core.models import Cave, Entrance
|
||||
import troggle.flatpages.models
|
||||
|
||||
databasename=settings.DATABASES['default']['NAME']
|
||||
from troggle.core.models import Cave, Entrance
|
||||
import troggle.settings
|
||||
import troggle.flatpages.models
|
||||
import troggle.logbooksdump
|
||||
|
||||
# NOTE databaseReset.py is *imported* by views_other.py as it is used in the control panel
|
||||
# presented there.
|
||||
|
||||
expouser=settings.EXPOUSER
|
||||
expouserpass=settings.EXPOUSERPASS
|
||||
expouseremail=settings.EXPOUSER_EMAIL
|
||||
|
||||
def reload_db():
|
||||
def reinit_db():
|
||||
"""Rebuild database from scratch. Deletes the file first if sqlite is used,
|
||||
otherwise it drops the database and creates it.
|
||||
"""
|
||||
currentdbname = settings.DATABASES['default']['NAME']
|
||||
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
|
||||
try:
|
||||
os.remove(databasename)
|
||||
os.remove(currentdbname)
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
cursor = connection.cursor()
|
||||
cursor.execute("DROP DATABASE %s" % databasename)
|
||||
cursor.execute("CREATE DATABASE %s" % databasename)
|
||||
cursor.execute("ALTER DATABASE %s CHARACTER SET=utf8" % databasename)
|
||||
cursor.execute("USE %s" % databasename)
|
||||
cursor.execute("DROP DATABASE %s" % currentdbname)
|
||||
cursor.execute("CREATE DATABASE %s" % currentdbname)
|
||||
cursor.execute("ALTER DATABASE %s CHARACTER SET=utf8" % currentdbname)
|
||||
cursor.execute("USE %s" % currentdbname)
|
||||
syncuser()
|
||||
|
||||
def syncuser():
|
||||
"""Sync user - needed after reload
|
||||
"""
|
||||
print("Synchronizing user")
|
||||
management.call_command('migrate', interactive=False)
|
||||
#management.call_command('syncdb', interactive=False)
|
||||
user = User.objects.create_user(expouser, expouseremail, expouserpass)
|
||||
user.is_staff = True
|
||||
user.is_superuser = True
|
||||
user.save()
|
||||
|
||||
def make_dirs():
|
||||
"""Make directories that troggle requires"""
|
||||
def dirsredirect():
|
||||
"""Make directories that troggle requires and sets up page redirects
|
||||
"""
|
||||
#should also deal with permissions here.
|
||||
if not os.path.isdir(settings.PHOTOS_ROOT):
|
||||
os.mkdir(settings.PHOTOS_ROOT)
|
||||
#if not os.path.isdir(settings.PHOTOS_ROOT):
|
||||
#os.mkdir(settings.PHOTOS_ROOT)
|
||||
# for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
|
||||
# f = troggle.flatpages.models.Redirect(originalURL = oldURL, newURL = newURL)
|
||||
# f.save()
|
||||
|
||||
def import_caves():
|
||||
import parsers.caves
|
||||
print("importing caves")
|
||||
parsers.caves.readcaves()
|
||||
import troggle.parsers.caves
|
||||
print("Importing Caves")
|
||||
troggle.parsers.caves.readcaves()
|
||||
|
||||
def import_people():
|
||||
import parsers.people
|
||||
parsers.people.LoadPersonsExpos()
|
||||
import troggle.parsers.people
|
||||
print("Importing People (folk.csv)")
|
||||
troggle.parsers.people.LoadPersonsExpos()
|
||||
|
||||
def import_logbooks():
|
||||
# The below line was causing errors I didn't understand (it said LOGFILE was a string), and I couldn't be bothered to figure
|
||||
# what was going on so I just catch the error with a try. - AC 21 May
|
||||
try:
|
||||
settings.LOGFILE.write('\nBegun importing logbooks at ' + time.asctime() +'\n'+'-'*60)
|
||||
except:
|
||||
pass
|
||||
|
||||
import parsers.logbooks
|
||||
parsers.logbooks.LoadLogbooks()
|
||||
|
||||
def import_survex():
|
||||
import parsers.survex
|
||||
parsers.survex.LoadAllSurvexBlocks()
|
||||
parsers.survex.LoadPos()
|
||||
import troggle.parsers.logbooks
|
||||
print("Importing Logbooks")
|
||||
troggle.parsers.logbooks.LoadLogbooks()
|
||||
|
||||
def import_QMs():
|
||||
import parsers.QMs
|
||||
print("Importing QMs (old caves)")
|
||||
import troggle.parsers.QMs
|
||||
# import process itself runs on qm.csv in only 3 old caves, not the modern ones!
|
||||
|
||||
def import_survexblks():
|
||||
import troggle.parsers.survex
|
||||
print("Importing Survex Blocks")
|
||||
troggle.parsers.survex.LoadAllSurvexBlocks()
|
||||
|
||||
def import_surveys():
|
||||
import parsers.surveys
|
||||
parsers.surveys.parseSurveys(logfile=settings.LOGFILE)
|
||||
def import_survexpos():
|
||||
import troggle.parsers.survex
|
||||
print("Importing Survex x/y/z Positions")
|
||||
troggle.parsers.survex.LoadPos()
|
||||
|
||||
def import_surveyimgs():
|
||||
"""This appears to store data in unused objects. The code is kept
|
||||
for future re-working to manage progress against notes, plans and elevs.
|
||||
"""
|
||||
#import troggle.parsers.surveys
|
||||
print("NOT Importing survey images")
|
||||
#troggle.parsers.surveys.parseSurveys(logfile=settings.LOGFILE)
|
||||
|
||||
def import_surveyscans():
|
||||
import parsers.surveys
|
||||
parsers.surveys.LoadListScans()
|
||||
import troggle.parsers.surveys
|
||||
print("Importing Survey Scans")
|
||||
troggle.parsers.surveys.LoadListScans()
|
||||
|
||||
def import_tunnelfiles():
|
||||
import parsers.surveys
|
||||
parsers.surveys.LoadTunnelFiles()
|
||||
import troggle.parsers.surveys
|
||||
print("Importing Tunnel files")
|
||||
troggle.parsers.surveys.LoadTunnelFiles()
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
# These functions moved to a different file - not used currently.
|
||||
#import logbooksdump
|
||||
#def import_auto_logbooks():
|
||||
#def dumplogbooks():
|
||||
|
||||
def reset():
|
||||
""" Wipe the troggle database and import everything from legacy data
|
||||
#def writeCaves():
|
||||
# Writes out all cave and entrance HTML files to
|
||||
# folder specified in settings.CAVEDESCRIPTIONS
|
||||
# for cave in Cave.objects.all():
|
||||
# cave.writeDataFile()
|
||||
# for entrance in Entrance.objects.all():
|
||||
# entrance.writeDataFile()
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
|
||||
class JobQueue():
|
||||
"""A list of import operations to run. Always reports profile times
|
||||
in the same order.
|
||||
"""
|
||||
reload_db()
|
||||
make_dirs()
|
||||
pageredirects()
|
||||
import_caves()
|
||||
import_people()
|
||||
import_surveyscans()
|
||||
import_survex()
|
||||
import_logbooks()
|
||||
import_QMs()
|
||||
try:
|
||||
import_tunnelfiles()
|
||||
except:
|
||||
print("Tunnel files parser broken.")
|
||||
|
||||
import_surveys()
|
||||
def __init__(self,run):
|
||||
self.runlabel = run
|
||||
self.queue = [] # tuples of (jobname, jobfunction)
|
||||
self.results = {}
|
||||
self.results_order=[
|
||||
"date","runlabel","reinit", "caves", "people",
|
||||
"logbooks", "QMs", "scans", "survexblks", "survexpos",
|
||||
"tunnel", "surveyimgs", "test", "dirsredirect", "syncuser" ]
|
||||
for k in self.results_order:
|
||||
self.results[k]=[]
|
||||
self.tfile = "import_profile.json"
|
||||
self.htmlfile = "profile.html" # for HTML results table. Not yet done.
|
||||
|
||||
#Adding elements to queue - enqueue
|
||||
def enq(self,label,func):
|
||||
self.queue.append((label,func))
|
||||
return True
|
||||
|
||||
def import_auto_logbooks():
|
||||
import parsers.logbooks
|
||||
import os
|
||||
for pt in troggle.core.models.PersonTrip.objects.all():
|
||||
pt.delete()
|
||||
for lbe in troggle.core.models.LogbookEntry.objects.all():
|
||||
lbe.delete()
|
||||
for expedition in troggle.core.models.Expedition.objects.all():
|
||||
directory = os.path.join(settings.EXPOWEB,
|
||||
"years",
|
||||
expedition.year,
|
||||
"autologbook")
|
||||
for root, dirs, filenames in os.walk(directory):
|
||||
for filename in filenames:
|
||||
print(os.path.join(root, filename))
|
||||
parsers.logbooks.parseAutoLogBookEntry(os.path.join(root, filename))
|
||||
#Removing the last element from the queue - dequeue
|
||||
# def deq(self):
|
||||
# if len(self.queue)>0:
|
||||
# return self.queue.pop()
|
||||
# return ("Queue Empty!")
|
||||
|
||||
#Temporary function until definative source of data transfered.
|
||||
from django.template.defaultfilters import slugify
|
||||
from django.template import Context, loader
|
||||
def dumplogbooks():
|
||||
def get_name(pe):
|
||||
if pe.nickname:
|
||||
return pe.nickname
|
||||
else:
|
||||
return pe.person.first_name
|
||||
for lbe in troggle.core.models.LogbookEntry.objects.all():
|
||||
dateStr = lbe.date.strftime("%Y-%m-%d")
|
||||
directory = os.path.join(settings.EXPOWEB,
|
||||
"years",
|
||||
lbe.expedition.year,
|
||||
"autologbook")
|
||||
if not os.path.isdir(directory):
|
||||
os.mkdir(directory)
|
||||
filename = os.path.join(directory,
|
||||
dateStr + "." + slugify(lbe.title)[:50] + ".html")
|
||||
if lbe.cave:
|
||||
print(lbe.cave.reference())
|
||||
trip = {"title": lbe.title, "html":lbe.text, "cave": lbe.cave.reference(), "caveOrLocation": "cave"}
|
||||
else:
|
||||
trip = {"title": lbe.title, "html":lbe.text, "location":lbe.place, "caveOrLocation": "location"}
|
||||
pts = [pt for pt in lbe.persontrip_set.all() if pt.personexpedition]
|
||||
persons = [{"name": get_name(pt.personexpedition), "TU": pt.time_underground, "author": pt.is_logbook_entry_author} for pt in pts]
|
||||
f = open(filename, "wb")
|
||||
template = loader.get_template('dataformat/logbookentry.html')
|
||||
context = Context({'trip': trip,
|
||||
'persons': persons,
|
||||
'date': dateStr,
|
||||
'expeditionyear': lbe.expedition.year})
|
||||
output = template.render(context)
|
||||
f.write(unicode(output).encode( "utf-8" ))
|
||||
def loadprofiles(self):
|
||||
"""Load timings for previous runs from file
|
||||
"""
|
||||
if os.path.isfile(self.tfile):
|
||||
try:
|
||||
f = open(self.tfile, "r")
|
||||
data = json.load(f)
|
||||
for j in data:
|
||||
self.results[j] = data[j]
|
||||
except:
|
||||
print("FAILURE parsing JSON file %s" % (self.tfile))
|
||||
# Python bug: https://github.com/ShinNoNoir/twitterwebsearch/issues/12
|
||||
f.close()
|
||||
for j in self.results_order:
|
||||
self.results[j].append(None) # append a placeholder
|
||||
return True
|
||||
|
||||
def saveprofiles(self):
|
||||
with open(self.tfile, 'w') as f:
|
||||
json.dump(self.results, f)
|
||||
return True
|
||||
|
||||
def memdumpsql(self):
|
||||
djconn = django.db.connection
|
||||
from dump import _iterdump
|
||||
with open('memdump.sql', 'w') as f:
|
||||
for line in _iterdump(djconn):
|
||||
f.write('%s\n' % line.encode("utf8"))
|
||||
return True
|
||||
|
||||
def pageredirects():
|
||||
for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
|
||||
f = troggle.flatpages.models.Redirect(originalURL = oldURL, newURL = newURL)
|
||||
f.save()
|
||||
def runqonce(self):
|
||||
"""Run all the jobs in the queue provided - once
|
||||
"""
|
||||
|
||||
print("** Running job ", self.runlabel)
|
||||
jobstart = time.time()
|
||||
self.results["date"].pop()
|
||||
self.results["date"].append(jobstart)
|
||||
self.results["runlabel"].pop()
|
||||
self.results["runlabel"].append(self.runlabel)
|
||||
|
||||
for i in self.queue:
|
||||
start = time.time()
|
||||
i[1]() # looks ugly but invokes function passed in the second item in the tuple
|
||||
duration = time.time()-start
|
||||
print("\n*- Ended \"", i[0], "\" %.1f seconds" % duration)
|
||||
self.results[i[0]].pop() # the null item
|
||||
self.results[i[0]].append(duration)
|
||||
|
||||
|
||||
jobend = time.time()
|
||||
jobduration = jobend-jobstart
|
||||
print("** Ended job %s - %.1f seconds total." % (self.runlabel,jobduration))
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def run(self):
|
||||
"""First runs all the jobs in the queue against a scratch in-memory db
|
||||
then re-runs the import against the db specified in settings.py
|
||||
Default behaviour is to skip the in-memory phase.
|
||||
When MySQL is the db the in-memory phase crashes as MySQL does not properly
|
||||
relinquish some kind of db connection (not fixed yet)
|
||||
"""
|
||||
self.loadprofiles()
|
||||
# save db settings for later
|
||||
dbengine = settings.DATABASES['default']['ENGINE']
|
||||
dbname = settings.DATABASES['default']['NAME']
|
||||
dbdefault = settings.DATABASES['default']
|
||||
|
||||
skipmem = False
|
||||
if self.runlabel:
|
||||
if self.runlabel == "":
|
||||
skipmem = True
|
||||
elif self.runlabel[0:2] == "F-":
|
||||
skipmem = True
|
||||
else:
|
||||
skipmem = True
|
||||
|
||||
print("-- ", settings.DATABASES['default']['NAME'], settings.DATABASES['default']['ENGINE'])
|
||||
#print "-- DATABASES.default", settings.DATABASES['default']
|
||||
|
||||
if dbname ==":memory:":
|
||||
# just run, and save the sql file
|
||||
self.runqonce()
|
||||
self.memdumpsql() # saved contents of scratch db, could be imported later..
|
||||
self.saveprofiles()
|
||||
elif skipmem:
|
||||
self.runqonce()
|
||||
self.saveprofiles()
|
||||
else:
|
||||
django.db.close_old_connections() # needed if MySQL running?
|
||||
# run all the imports through :memory: first
|
||||
settings.DATABASES['default']['ENGINE'] = 'django.db.backends.sqlite3'
|
||||
settings.DATABASES['default']['NAME'] = ":memory:"
|
||||
settings.DATABASES['default'] = {'ENGINE': 'django.db.backends.sqlite3',
|
||||
'AUTOCOMMIT': True,
|
||||
'ATOMIC_REQUESTS': False,
|
||||
'NAME': ':memory:',
|
||||
'CONN_MAX_AGE': 0,
|
||||
'TIME_ZONE': 'UTC',
|
||||
'OPTIONS': {},
|
||||
'HOST': '',
|
||||
'USER': '',
|
||||
'TEST': {'COLLATION': None, 'CHARSET': None, 'NAME': None, 'MIRROR': None},
|
||||
'PASSWORD': '',
|
||||
'PORT': ''}
|
||||
|
||||
|
||||
print("-- ", settings.DATABASES['default']['NAME'], settings.DATABASES['default']['ENGINE'])
|
||||
#print("-- DATABASES.default", settings.DATABASES['default'])
|
||||
|
||||
# but because the user may be expecting to add this to a db with lots of tables already there,
|
||||
# the jobqueue may not start from scratch so we need to initialise the db properly first
|
||||
# because we are using an empty :memory: database
|
||||
# But initiating twice crashes it; so be sure to do it once only.
|
||||
|
||||
|
||||
# Damn. syncdb() is still calling MySQL somehow **conn_params not sqlite3. So crashes on expo server.
|
||||
if ("reinit",reinit_db) not in self.queue:
|
||||
reinit_db()
|
||||
if ("dirsredirect",dirsredirect) not in self.queue:
|
||||
dirsredirect()
|
||||
if ("caves",import_caves) not in self.queue:
|
||||
import_caves() # sometime extract the initialising code from this and put in reinit...
|
||||
if ("people",import_people) not in self.queue:
|
||||
import_people() # sometime extract the initialising code from this and put in reinit...
|
||||
|
||||
django.db.close_old_connections() # maybe not needed here
|
||||
|
||||
self.runqonce()
|
||||
self.memdumpsql()
|
||||
self.showprofile()
|
||||
|
||||
# restore the original db and import again
|
||||
# if we wanted to, we could re-import the SQL generated in the first pass to be
|
||||
# blazing fast. But for the present just re-import the lot.
|
||||
settings.DATABASES['default'] = dbdefault
|
||||
settings.DATABASES['default']['ENGINE'] = dbengine
|
||||
settings.DATABASES['default']['NAME'] = dbname
|
||||
print("-- ", settings.DATABASES['default']['NAME'], settings.DATABASES['default']['ENGINE'])
|
||||
|
||||
django.db.close_old_connections() # maybe not needed here
|
||||
for j in self.results_order:
|
||||
self.results[j].pop() # throw away results from :memory: run
|
||||
self.results[j].append(None) # append a placeholder
|
||||
|
||||
django.db.close_old_connections() # magic rune. works. found by looking in django.db__init__.py
|
||||
#django.setup() # should this be needed?
|
||||
|
||||
self.runqonce() # crashes because it thinks it has no migrations to apply, when it does.
|
||||
self.saveprofiles()
|
||||
|
||||
return True
|
||||
|
||||
def showprofile(self):
|
||||
"""Prints out the time it took to run the jobqueue
|
||||
"""
|
||||
for k in self.results_order:
|
||||
if k =="dirsredirect":
|
||||
break
|
||||
if k =="surveyimgs":
|
||||
break
|
||||
elif k =="syncuser":
|
||||
break
|
||||
elif k =="test":
|
||||
break
|
||||
elif k =="date":
|
||||
print(" days ago ", end=' ')
|
||||
else:
|
||||
print('%10s (s)' % k, end=' ')
|
||||
percen=0
|
||||
r = self.results[k]
|
||||
|
||||
for i in range(len(r)):
|
||||
if k == "runlabel":
|
||||
if r[i]:
|
||||
rp = r[i]
|
||||
else:
|
||||
rp = " - "
|
||||
print('%8s' % rp, end=' ')
|
||||
elif k =="date":
|
||||
# Calculate dates as days before present
|
||||
if r[i]:
|
||||
if i == len(r)-1:
|
||||
print(" this", end=' ')
|
||||
else:
|
||||
# prints one place to the left of where you expect
|
||||
if r[len(r)-1]:
|
||||
s = r[i]-r[len(r)-1]
|
||||
else:
|
||||
s = 0
|
||||
days = (s)/(24*60*60)
|
||||
print('%8.2f' % days, end=' ')
|
||||
elif r[i]:
|
||||
print('%8.1f' % r[i], end=' ')
|
||||
if i == len(r)-1 and r[i-1]:
|
||||
percen = 100* (r[i] - r[i-1])/r[i-1]
|
||||
if abs(percen) >0.1:
|
||||
print('%8.1f%%' % percen, end=' ')
|
||||
else:
|
||||
print(" - ", end=' ')
|
||||
print("")
|
||||
print("\n")
|
||||
return True
|
||||
|
||||
def writeCaves():
|
||||
for cave in Cave.objects.all():
|
||||
cave.writeDataFile()
|
||||
for entrance in Entrance.objects.all():
|
||||
entrance.writeDataFile()
|
||||
|
||||
def usage():
|
||||
print("""Usage is 'python databaseReset.py <command>'
|
||||
print("""Usage is 'python databaseReset.py <command> [runlabel]'
|
||||
where command is:
|
||||
reset - this is normal usage, clear database and reread everything
|
||||
desc
|
||||
caves - read in the caves
|
||||
logbooks - read in the logbooks
|
||||
autologbooks
|
||||
dumplogbooks
|
||||
people
|
||||
QMs - read in the QM files
|
||||
resetend
|
||||
scans - read in the scanned surveynotes
|
||||
survex - read in the survex files
|
||||
survexpos
|
||||
surveys
|
||||
tunnel - read in the Tunnel files
|
||||
writeCaves
|
||||
test - testing... imports people and prints profile. Deletes nothing.
|
||||
profile - print the profile from previous runs. Import nothing.
|
||||
|
||||
reset - normal usage: clear database and reread everything from files - time-consuming
|
||||
caves - read in the caves (must run first after reset)
|
||||
people - read in the people from folk.csv (must run before logbooks)
|
||||
logbooks - read in the logbooks
|
||||
QMs - read in the QM csv files (older caves only)
|
||||
scans - the survey scans in all the wallets (must run before survex)
|
||||
survex - read in the survex files - all the survex blocks but not the x/y/z positions
|
||||
survexpos - set the x/y/z positions for entrances and fixed points
|
||||
|
||||
tunnel - read in the Tunnel files - which scans the survey scans too
|
||||
|
||||
reinit - clear database (delete everything) and make empty tables. Import nothing.
|
||||
syncuser - needed after reloading database from SQL backup
|
||||
autologbooks - Not used. read in autologbooks (what are these?)
|
||||
dumplogbooks - Not used. write out autologbooks (not working?)
|
||||
surveyimgs - Not used. read in scans by-expo, must run after "people".
|
||||
|
||||
and [runlabel] is an optional string identifying this run of the script
|
||||
in the stored profiling data 'import-profile.json'
|
||||
if [runlabel] is absent or begins with "F-" then it will skip the :memory: pass
|
||||
|
||||
caves and logbooks must be run on an empty db before the others as they
|
||||
set up db tables used by the others.
|
||||
|
||||
the in-memory phase is on an empty db, so always runs reinit, caves & people for this phase
|
||||
""")
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -191,57 +405,66 @@ if __name__ == "__main__":
|
||||
import sys
|
||||
import django
|
||||
django.setup()
|
||||
if "desc" in sys.argv:
|
||||
resetdesc()
|
||||
elif "scans" in sys.argv:
|
||||
import_surveyscans()
|
||||
|
||||
if len(sys.argv)>2:
|
||||
runlabel = sys.argv[len(sys.argv)-1]
|
||||
else:
|
||||
runlabel=None
|
||||
|
||||
jq = JobQueue(runlabel)
|
||||
|
||||
if len(sys.argv)==1:
|
||||
usage()
|
||||
exit()
|
||||
elif "test" in sys.argv:
|
||||
jq.enq("caves",import_caves)
|
||||
jq.enq("people",import_people)
|
||||
elif "caves" in sys.argv:
|
||||
reload_db()
|
||||
make_dirs()
|
||||
pageredirects()
|
||||
import_caves()
|
||||
elif "people" in sys.argv:
|
||||
import_people()
|
||||
elif "QMs" in sys.argv:
|
||||
import_QMs()
|
||||
elif "tunnel" in sys.argv:
|
||||
import_tunnelfiles()
|
||||
elif "reset" in sys.argv:
|
||||
reset()
|
||||
elif "resetend" in sys.argv:
|
||||
#import_logbooks()
|
||||
import_QMs()
|
||||
try:
|
||||
import_tunnelfiles()
|
||||
except:
|
||||
print("Tunnel files parser broken.")
|
||||
import_surveys()
|
||||
import_descriptions()
|
||||
parse_descriptions()
|
||||
elif "survex" in sys.argv:
|
||||
#management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
|
||||
import_survex()
|
||||
elif "survexpos" in sys.argv:
|
||||
management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
|
||||
import parsers.survex
|
||||
parsers.survex.LoadPos()
|
||||
jq.enq("caves",import_caves)
|
||||
elif "logbooks" in sys.argv:
|
||||
management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
|
||||
import_logbooks()
|
||||
elif "autologbooks" in sys.argv:
|
||||
jq.enq("logbooks",import_logbooks)
|
||||
elif "people" in sys.argv:
|
||||
jq.enq("people",import_people)
|
||||
elif "QMs" in sys.argv:
|
||||
jq.enq("QMs",import_QMs)
|
||||
elif "reset" in sys.argv:
|
||||
jq.enq("reinit",reinit_db)
|
||||
jq.enq("dirsredirect",dirsredirect)
|
||||
jq.enq("caves",import_caves)
|
||||
jq.enq("people",import_people)
|
||||
jq.enq("scans",import_surveyscans)
|
||||
jq.enq("logbooks",import_logbooks)
|
||||
jq.enq("QMs",import_QMs)
|
||||
jq.enq("tunnel",import_tunnelfiles)
|
||||
#jq.enq("survexblks",import_survexblks)
|
||||
#jq.enq("survexpos",import_survexpos)
|
||||
elif "scans" in sys.argv:
|
||||
jq.enq("scans",import_surveyscans)
|
||||
elif "survex" in sys.argv:
|
||||
jq.enq("survexblks",import_survexblks)
|
||||
elif "survexpos" in sys.argv:
|
||||
jq.enq("survexpos",import_survexpos)
|
||||
elif "tunnel" in sys.argv:
|
||||
jq.enq("tunnel",import_tunnelfiles)
|
||||
elif "surveyimgs" in sys.argv:
|
||||
jq.enq("surveyimgs",import_surveyimgs) # imports into tables which are never read
|
||||
elif "autologbooks" in sys.argv: # untested in 2020
|
||||
import_auto_logbooks()
|
||||
elif "dumplogbooks" in sys.argv:
|
||||
elif "dumplogbooks" in sys.argv: # untested in 2020
|
||||
dumplogbooks()
|
||||
elif "writeCaves" in sys.argv:
|
||||
writeCaves()
|
||||
elif "surveys" in sys.argv:
|
||||
import_surveys()
|
||||
# elif "writecaves" in sys.argv: # untested in 2020 - will overwrite input files!!
|
||||
# writeCaves()
|
||||
elif "profile" in sys.argv:
|
||||
jq.loadprofiles()
|
||||
jq.showprofile()
|
||||
exit()
|
||||
elif "help" in sys.argv:
|
||||
usage()
|
||||
exit()
|
||||
else:
|
||||
print("%s not recognised" % sys.argv)
|
||||
usage()
|
||||
print(("%s not recognised as a command." % sys.argv[1]))
|
||||
exit()
|
||||
|
||||
|
||||
|
||||
|
||||
jq.run()
|
||||
jq.showprofile()
|
||||
|
||||
85
debian/serversetup
vendored
Normal file
85
debian/serversetup
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
Instructions for setting up new expo debian server/VM
|
||||
For Debian Stretch, June 2019.
|
||||
|
||||
adduser expo
|
||||
apt install openssh-server mosh tmux mc zile emacs-nox mc most ncdu
|
||||
apt install python-django apache2 mysql-server survex make rsync
|
||||
apt install libjs-openlayers make
|
||||
apt install git mercurial mercurial-server?
|
||||
|
||||
for boe:
|
||||
apt install libcgi-session-perl libcrypt-passwdmd5-perl libfile-slurp-perl libgit-wrapper-perl libhtml-template-perl libhtml-template-pro-perl libmime-lite-perl libtext-password-pronounceable-perl libtime-parsedate-perl libuuid-tiny-perl libcrypt-cracklib-perl
|
||||
|
||||
obsolete-packages:
|
||||
bins (move to jigl?) (for photos)
|
||||
python-django 1.7
|
||||
backports: survex therion
|
||||
not-packaged: caveview
|
||||
|
||||
make these dirs available at top documentroot:
|
||||
cuccfiles
|
||||
expofiles
|
||||
loser (link to repo)
|
||||
tunneldata (link to repo)
|
||||
troggle (link to repo)
|
||||
expoweb (link to repo)
|
||||
boc/boe
|
||||
|
||||
|
||||
config
|
||||
containing:
|
||||
|
||||
setup apache configs for cucc and expo
|
||||
#disable default website
|
||||
a2dissite 000-default
|
||||
a2ensite cucc
|
||||
a2ensite expo
|
||||
a2enmod cgid
|
||||
|
||||
|
||||
Boe config:
|
||||
Alias /boe /home/expo/boe/boc/boc.pl
|
||||
<Directory /home/expo/boe/boc>
|
||||
AddHandler cgi-script .pl
|
||||
SetHandler cgi-script
|
||||
Options +ExecCGI
|
||||
Require all granted
|
||||
</Directory>
|
||||
And remember to set both program and data dir to be
|
||||
www-data:www-data
|
||||
(optionally make file group read/write by treasurer account)
|
||||
create empty repo by clicking create in boe interface
|
||||
then set names in 'settings'
|
||||
|
||||
Set up mysql (as root)
|
||||
mysql -p
|
||||
CREATE DATABASE troggle;
|
||||
GRANT ALL PRIVILEGES ON troggle.* TO 'expo'@'localhost' IDENTIFIED BY 'somepassword';
|
||||
|
||||
install django:
|
||||
sudo apt install python-django python-django-registration python-django-imagekit python-django-tinymce fonts-freefont-ttf libapache2-mod-wsgi
|
||||
|
||||
python-django-imagekit comes from https://salsa.debian.org/python-team/modules/python-django-imagekit
|
||||
python-django-tinymce comes from https://salsa.debian.org/python-team/modules/python-django-tinymce
|
||||
(both modified for stretch/python2). packages under /home/wookey/packages/
|
||||
|
||||
need fonts-freefont-ttf (to have truetype freesans available for troggle via PIL)
|
||||
need libapache2-mod-wsgi for apache wsgi support.
|
||||
|
||||
On stretch the django 1.10 is no use so get rid of that:
|
||||
apt remove python3-django python-django python-django-common python-django-doc
|
||||
|
||||
Then replace with django 1.7 (Needs to be built for stretch)
|
||||
apt install python-django python-django-common python-django-doc
|
||||
apt install python-django-registration python-django-imagekit python-django-tinymce
|
||||
|
||||
then hold them to stop them being upgraded by unattended upgrades:
|
||||
echo "python-django hold" | sudo dpkg --set-selections
|
||||
echo "python-django-common hold" | sudo dpkg --set-selections
|
||||
echo "python-django-doc hold" | sudo dpkg --set-selections
|
||||
|
||||
#troggle has to have a writable logfile otherwise the website explodes
|
||||
# 500 error on the server, and apache error log has non-rentrant errors
|
||||
create /var/log/troggle/troggle.log
|
||||
chown www-data:adm /var/log/troggle/troggle.log
|
||||
chmod 660 /var/log/troggle/troggle.log
|
||||
@@ -40,7 +40,6 @@ mkdir -p expofiles/surveyscans
|
||||
|
||||
To start the containers run
|
||||
```bash
|
||||
$ cd ~/expo/troggle/docker
|
||||
$ docker-compose up
|
||||
```
|
||||
You will now have a working troggle but with no data. To import the data you need to access the container run
|
||||
|
||||
@@ -6,3 +6,4 @@ django-imagekit
|
||||
Image
|
||||
django-tinymce==2.7.0
|
||||
smartencoding
|
||||
unidecode
|
||||
|
||||
69
dump.py
Normal file
69
dump.py
Normal file
@@ -0,0 +1,69 @@
|
||||
# Mimic the sqlite3 console shell's .dump command
|
||||
# Author: Paul Kippes <kippesp@gmail.com>
|
||||
|
||||
# Every identifier in sql is quoted based on a comment in sqlite
|
||||
# documentation "SQLite adds new keywords from time to time when it
|
||||
# takes on new features. So to prevent your code from being broken by
|
||||
# future enhancements, you should normally quote any identifier that
|
||||
# is an English language word, even if you do not have to."
|
||||
|
||||
def _iterdump(connection):
|
||||
"""
|
||||
Returns an iterator to the dump of the database in an SQL text format.
|
||||
Used to produce an SQL dump of the database. Useful to save an in-memory
|
||||
database for later restoration. This function should not be called
|
||||
directly but instead called from the Connection method, iterdump().
|
||||
"""
|
||||
|
||||
cu = connection.cursor()
|
||||
yield('BEGIN TRANSACTION;')
|
||||
|
||||
# sqlite_master table contains the SQL CREATE statements for the database.
|
||||
q = """
|
||||
SELECT "name", "type", "sql"
|
||||
FROM "sqlite_master"
|
||||
WHERE "sql" NOT NULL AND
|
||||
"type" == 'table'
|
||||
ORDER BY "name"
|
||||
"""
|
||||
schema_res = cu.execute(q)
|
||||
for table_name, type, sql in schema_res.fetchall():
|
||||
if table_name == 'sqlite_sequence':
|
||||
yield('DELETE FROM "sqlite_sequence";')
|
||||
elif table_name == 'sqlite_stat1':
|
||||
yield('ANALYZE "sqlite_master";')
|
||||
elif table_name.startswith('sqlite_'):
|
||||
continue
|
||||
# NOTE: Virtual table support not implemented
|
||||
#elif sql.startswith('CREATE VIRTUAL TABLE'):
|
||||
# qtable = table_name.replace("'", "''")
|
||||
# yield("INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql)"\
|
||||
# "VALUES('table','{0}','{0}',0,'{1}');".format(
|
||||
# qtable,
|
||||
# sql.replace("''")))
|
||||
else:
|
||||
yield('{0};'.format(sql))
|
||||
|
||||
# Build the insert statement for each row of the current table
|
||||
table_name_ident = table_name.replace('"', '""')
|
||||
res = cu.execute('PRAGMA table_info("{0}")'.format(table_name_ident))
|
||||
column_names = [str(table_info[1]) for table_info in res.fetchall()]
|
||||
q = """SELECT 'INSERT INTO "{0}" VALUES({1})' FROM "{0}";""".format(
|
||||
table_name_ident,
|
||||
",".join("""'||quote("{0}")||'""".format(col.replace('"', '""')) for col in column_names))
|
||||
query_res = cu.execute(q)
|
||||
for row in query_res:
|
||||
yield(row[0]) # '{0}'.format(row[0]) had unicode errors
|
||||
|
||||
# Now when the type is 'index', 'trigger', or 'view'
|
||||
q = """
|
||||
SELECT "name", "type", "sql"
|
||||
FROM "sqlite_master"
|
||||
WHERE "sql" NOT NULL AND
|
||||
"type" IN ('index', 'trigger', 'view')
|
||||
"""
|
||||
schema_res = cu.execute(q)
|
||||
for name, type, sql in schema_res.fetchall():
|
||||
yield('{0};'.format(sql))
|
||||
|
||||
yield('COMMIT;')
|
||||
@@ -1,6 +1,6 @@
|
||||
import troggle.settings as settings
|
||||
from troggle.helper import login_required_if_public
|
||||
from utils import render_with_context
|
||||
from django.shortcuts import render
|
||||
|
||||
from django.http import HttpResponse, HttpResponseRedirect, Http404
|
||||
from django.core.urlresolvers import reverse
|
||||
@@ -38,7 +38,7 @@ def flatpage(request, path):
|
||||
print("flat path noinfo", path)
|
||||
return HttpResponseRedirect(reverse("auth_login") + '?next=%s' % request.path)
|
||||
|
||||
if path.endswith("/") or path == "":
|
||||
if path.endswith("/") or path == "":
|
||||
try:
|
||||
o = open(os.path.normpath(settings.EXPOWEB + path + "index.html"), "rb")
|
||||
path = path + "index.html"
|
||||
@@ -47,13 +47,13 @@ def flatpage(request, path):
|
||||
o = open(os.path.normpath(settings.EXPOWEB + path + "index.htm"), "rb")
|
||||
path = path + "index.htm"
|
||||
except IOError:
|
||||
return render_with_context(request, 'pagenotfound.html', {'path': path})
|
||||
return render(request, 'pagenotfound.html', {'path': path})
|
||||
else:
|
||||
try:
|
||||
filetobeopened = os.path.normpath(settings.EXPOWEB + path)
|
||||
o = open(filetobeopened, "rb")
|
||||
except IOError:
|
||||
return render_with_context(request, 'pagenotfound.html', {'path': path})
|
||||
return render(request, 'pagenotfound.html', {'path': path})
|
||||
if path.endswith(".htm") or path.endswith(".html"):
|
||||
html = o.read()
|
||||
|
||||
@@ -67,15 +67,24 @@ def flatpage(request, path):
|
||||
title, = m.groups()
|
||||
else:
|
||||
title = ""
|
||||
m = re.search(r"<meta([^>]*)noedit", head, re.DOTALL + re.IGNORECASE)
|
||||
if m:
|
||||
editable = False
|
||||
else:
|
||||
editable = True
|
||||
|
||||
has_menu = False
|
||||
menumatch = re.match('(.*)<div id="menu">', body, re.DOTALL + re.IGNORECASE)
|
||||
if menumatch:
|
||||
has_menu = True
|
||||
menumatch = re.match('(.*)<ul id="links">', body, re.DOTALL + re.IGNORECASE)
|
||||
if menumatch:
|
||||
has_menu = True
|
||||
#body, = menumatch.groups()
|
||||
if re.search(r"iso-8859-1", html):
|
||||
body = unicode(body, "iso-8859-1")
|
||||
body.strip
|
||||
return render_with_context(request, 'flatpage.html', {'editable': True, 'path': path, 'title': title, 'body': body, 'homepage': (path == "index.htm"), 'has_menu': has_menu})
|
||||
return render(request, 'flatpage.html', {'editable': editable, 'path': path, 'title': title, 'body': body, 'homepage': (path == "index.htm"), 'has_menu': has_menu})
|
||||
else:
|
||||
return HttpResponse(o.read(), content_type=getmimetype(path))
|
||||
|
||||
@@ -160,9 +169,9 @@ def editflatpage(request, path):
|
||||
flatpageForm = FlatPageForm({"html": body, "title": title})
|
||||
else:
|
||||
flatpageForm = FlatPageForm()
|
||||
return render_with_context(request, 'editflatpage.html', {'path': path, 'form': flatpageForm, })
|
||||
return render(request, 'editflatpage.html', {'path': path, 'form': flatpageForm, })
|
||||
|
||||
class FlatPageForm(forms.Form):
|
||||
title = forms.CharField(widget=forms.TextInput(attrs={'size':'60'}))
|
||||
|
||||
html = forms.CharField(widget=forms.Textarea())
|
||||
html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 20}))
|
||||
|
||||
13
imagekit/__init__.py
Normal file
13
imagekit/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
"""
|
||||
|
||||
Django ImageKit
|
||||
|
||||
Author: Justin Driscoll <justin.driscoll@gmail.com>
|
||||
Version: 0.2
|
||||
|
||||
"""
|
||||
VERSION = "0.2"
|
||||
|
||||
|
||||
|
||||
|
||||
21
imagekit/defaults.py
Normal file
21
imagekit/defaults.py
Normal file
@@ -0,0 +1,21 @@
|
||||
""" Default ImageKit configuration """
|
||||
|
||||
from imagekit.specs import ImageSpec
|
||||
from imagekit import processors
|
||||
|
||||
class ResizeThumbnail(processors.Resize):
|
||||
width = 100
|
||||
height = 50
|
||||
crop = True
|
||||
|
||||
class EnhanceSmall(processors.Adjustment):
|
||||
contrast = 1.2
|
||||
sharpness = 1.1
|
||||
|
||||
class SampleReflection(processors.Reflection):
|
||||
size = 0.5
|
||||
background_color = "#000000"
|
||||
|
||||
class DjangoAdminThumbnail(ImageSpec):
|
||||
access_as = 'admin_thumbnail'
|
||||
processors = [ResizeThumbnail, EnhanceSmall, SampleReflection]
|
||||
17
imagekit/lib.py
Normal file
17
imagekit/lib.py
Normal file
@@ -0,0 +1,17 @@
|
||||
# Required PIL classes may or may not be available from the root namespace
|
||||
# depending on the installation method used.
|
||||
try:
|
||||
import Image
|
||||
import ImageFile
|
||||
import ImageFilter
|
||||
import ImageEnhance
|
||||
import ImageColor
|
||||
except ImportError:
|
||||
try:
|
||||
from PIL import Image
|
||||
from PIL import ImageFile
|
||||
from PIL import ImageFilter
|
||||
from PIL import ImageEnhance
|
||||
from PIL import ImageColor
|
||||
except ImportError:
|
||||
raise ImportError('ImageKit was unable to import the Python Imaging Library. Please confirm it`s installed and available on your current Python path.')
|
||||
1
imagekit/management/__init__.py
Normal file
1
imagekit/management/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
1
imagekit/management/commands/__init__.py
Normal file
1
imagekit/management/commands/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
134
imagekit/processors.py
Normal file
134
imagekit/processors.py
Normal file
@@ -0,0 +1,134 @@
|
||||
""" Imagekit Image "ImageProcessors"
|
||||
|
||||
A processor defines a set of class variables (optional) and a
|
||||
class method named "process" which processes the supplied image using
|
||||
the class properties as settings. The process method can be overridden as well allowing user to define their
|
||||
own effects/processes entirely.
|
||||
|
||||
"""
|
||||
from imagekit.lib import *
|
||||
|
||||
class ImageProcessor(object):
|
||||
""" Base image processor class """
|
||||
@classmethod
|
||||
def process(cls, image, obj=None):
|
||||
return image
|
||||
|
||||
|
||||
class Adjustment(ImageProcessor):
|
||||
color = 1.0
|
||||
brightness = 1.0
|
||||
contrast = 1.0
|
||||
sharpness = 1.0
|
||||
|
||||
@classmethod
|
||||
def process(cls, image, obj=None):
|
||||
for name in ['Color', 'Brightness', 'Contrast', 'Sharpness']:
|
||||
factor = getattr(cls, name.lower())
|
||||
if factor != 1.0:
|
||||
image = getattr(ImageEnhance, name)(image).enhance(factor)
|
||||
return image
|
||||
|
||||
|
||||
class Reflection(ImageProcessor):
|
||||
background_color = '#FFFFFF'
|
||||
size = 0.0
|
||||
opacity = 0.6
|
||||
|
||||
@classmethod
|
||||
def process(cls, image, obj=None):
|
||||
# convert bgcolor string to rgb value
|
||||
background_color = ImageColor.getrgb(cls.background_color)
|
||||
# copy orignial image and flip the orientation
|
||||
reflection = image.copy().transpose(Image.FLIP_TOP_BOTTOM)
|
||||
# create a new image filled with the bgcolor the same size
|
||||
background = Image.new("RGB", image.size, background_color)
|
||||
# calculate our alpha mask
|
||||
start = int(255 - (255 * cls.opacity)) # The start of our gradient
|
||||
steps = int(255 * cls.size) # the number of intermedite values
|
||||
increment = (255 - start) / float(steps)
|
||||
mask = Image.new('L', (1, 255))
|
||||
for y in range(255):
|
||||
if y < steps:
|
||||
val = int(y * increment + start)
|
||||
else:
|
||||
val = 255
|
||||
mask.putpixel((0, y), val)
|
||||
alpha_mask = mask.resize(image.size)
|
||||
# merge the reflection onto our background color using the alpha mask
|
||||
reflection = Image.composite(background, reflection, alpha_mask)
|
||||
# crop the reflection
|
||||
reflection_height = int(image.size[1] * cls.size)
|
||||
reflection = reflection.crop((0, 0, image.size[0], reflection_height))
|
||||
# create new image sized to hold both the original image and the reflection
|
||||
composite = Image.new("RGB", (image.size[0], image.size[1]+reflection_height), background_color)
|
||||
# paste the orignal image and the reflection into the composite image
|
||||
composite.paste(image, (0, 0))
|
||||
composite.paste(reflection, (0, image.size[1]))
|
||||
# return the image complete with reflection effect
|
||||
return composite
|
||||
|
||||
|
||||
class Resize(ImageProcessor):
|
||||
width = None
|
||||
height = None
|
||||
crop = False
|
||||
upscale = False
|
||||
|
||||
@classmethod
|
||||
def process(cls, image, obj=None):
|
||||
cur_width, cur_height = image.size
|
||||
if cls.crop:
|
||||
crop_horz = getattr(obj, obj._ik.crop_horz_field, 1)
|
||||
crop_vert = getattr(obj, obj._ik.crop_vert_field, 1)
|
||||
ratio = max(float(cls.width)/cur_width, float(cls.height)/cur_height)
|
||||
resize_x, resize_y = ((cur_width * ratio), (cur_height * ratio))
|
||||
crop_x, crop_y = (abs(cls.width - resize_x), abs(cls.height - resize_y))
|
||||
x_diff, y_diff = (int(crop_x / 2), int(crop_y / 2))
|
||||
box_left, box_right = {
|
||||
0: (0, cls.width),
|
||||
1: (int(x_diff), int(x_diff + cls.width)),
|
||||
2: (int(crop_x), int(resize_x)),
|
||||
}[crop_horz]
|
||||
box_upper, box_lower = {
|
||||
0: (0, cls.height),
|
||||
1: (int(y_diff), int(y_diff + cls.height)),
|
||||
2: (int(crop_y), int(resize_y)),
|
||||
}[crop_vert]
|
||||
box = (box_left, box_upper, box_right, box_lower)
|
||||
image = image.resize((int(resize_x), int(resize_y)), Image.ANTIALIAS).crop(box)
|
||||
else:
|
||||
if not cls.width is None and not cls.height is None:
|
||||
ratio = min(float(cls.width)/cur_width,
|
||||
float(cls.height)/cur_height)
|
||||
else:
|
||||
if cls.width is None:
|
||||
ratio = float(cls.height)/cur_height
|
||||
else:
|
||||
ratio = float(cls.width)/cur_width
|
||||
new_dimensions = (int(round(cur_width*ratio)),
|
||||
int(round(cur_height*ratio)))
|
||||
if new_dimensions[0] > cur_width or \
|
||||
new_dimensions[1] > cur_height:
|
||||
if not cls.upscale:
|
||||
return image
|
||||
image = image.resize(new_dimensions, Image.ANTIALIAS)
|
||||
return image
|
||||
|
||||
|
||||
class Transpose(ImageProcessor):
|
||||
""" Rotates or flips the image
|
||||
|
||||
Method should be one of the following strings:
|
||||
- FLIP_LEFT RIGHT
|
||||
- FLIP_TOP_BOTTOM
|
||||
- ROTATE_90
|
||||
- ROTATE_270
|
||||
- ROTATE_180
|
||||
|
||||
"""
|
||||
method = 'FLIP_LEFT_RIGHT'
|
||||
|
||||
@classmethod
|
||||
def process(cls, image, obj=None):
|
||||
return image.transpose(getattr(Image, cls.method))
|
||||
15
imagekit/utils.py
Normal file
15
imagekit/utils.py
Normal file
@@ -0,0 +1,15 @@
|
||||
""" ImageKit utility functions """
|
||||
|
||||
import tempfile
|
||||
|
||||
def img_to_fobj(img, format, **kwargs):
|
||||
tmp = tempfile.TemporaryFile()
|
||||
if format != 'JPEG':
|
||||
try:
|
||||
img.save(tmp, format, **kwargs)
|
||||
return
|
||||
except KeyError:
|
||||
pass
|
||||
img.save(tmp, format, **kwargs)
|
||||
tmp.seek(0)
|
||||
return tmp
|
||||
78
localsettings WSL.py
Normal file
78
localsettings WSL.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import sys
|
||||
# link localsettings to this file for use on a Windows 10 machine running WSL1
|
||||
# expofiles on a different drive
|
||||
|
||||
DATABASES = {
|
||||
'default': {
|
||||
'ENGINE': 'django.db.backends.sqlite3', # 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
|
||||
'NAME' : 'troggle.sqlite', # Or path to database file if using sqlite3.
|
||||
'USER' : 'expo', # Not used with sqlite3.
|
||||
'PASSWORD' : 'sekrit', # Not used with sqlite3.
|
||||
'HOST' : '', # Set to empty string for localhost. Not used with sqlite3.
|
||||
'PORT' : '', # Set to empty string for default. Not used with sqlite3.
|
||||
}
|
||||
}
|
||||
|
||||
EXPOUSER = 'expo'
|
||||
EXPOUSERPASS = 'nnn:ggggggr'
|
||||
EXPOUSER_EMAIL = 'philip.sargent@gmail.com'
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
REPOS_ROOT_PATH = '/mnt/d/CUCC-Expo/'
|
||||
|
||||
sys.path.append(REPOS_ROOT_PATH)
|
||||
sys.path.append(REPOS_ROOT_PATH + 'troggle')
|
||||
|
||||
PUBLIC_SITE = False
|
||||
|
||||
SURVEX_DATA = REPOS_ROOT_PATH + 'loser/'
|
||||
TUNNEL_DATA = REPOS_ROOT_PATH + 'drawings/'
|
||||
THREEDCACHEDIR = REPOS_ROOT_PATH + 'expowebcache/3d/'
|
||||
|
||||
CAVERN = 'cavern'
|
||||
THREEDTOPOS = '3dtopos'
|
||||
EXPOWEB = REPOS_ROOT_PATH + 'expoweb/'
|
||||
SURVEYS = REPOS_ROOT_PATH
|
||||
#SURVEY_SCANS = REPOS_ROOT_PATH + 'expofiles/'
|
||||
SURVEY_SCANS = '/mnt/f/expofiles/'
|
||||
#FILES = REPOS_ROOT_PATH + 'expofiles'
|
||||
FILES = '/mnt/f/expofiles'
|
||||
|
||||
EXPOWEB_URL = ''
|
||||
SURVEYS_URL = '/survey_scans/'
|
||||
|
||||
PYTHON_PATH = REPOS_ROOT_PATH + 'troggle/'
|
||||
|
||||
URL_ROOT = 'http://127.0.0.1:8000/'
|
||||
#URL_ROOT = "/mnt/d/CUCC-Expo/expoweb/"
|
||||
DIR_ROOT = ''#this should end in / if a value is given
|
||||
|
||||
|
||||
|
||||
|
||||
#MEDIA_URL = URL_ROOT + DIR_ROOT + '/site_media/'
|
||||
MEDIA_URL = '/site_media/'
|
||||
MEDIA_ROOT = REPOS_ROOT_PATH + 'troggle/media/'
|
||||
MEDIA_ADMIN_DIR = '/usr/lib/python2.7/site-packages/django/contrib/admin/media/'
|
||||
|
||||
STATIC_URL = URL_ROOT + 'static/'
|
||||
STATIC_ROOT = DIR_ROOT + '/mnt/d/CUCC-Expo/'
|
||||
|
||||
JSLIB_URL = URL_ROOT + 'javascript/'
|
||||
|
||||
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/'
|
||||
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + '/tinymce_media/'
|
||||
|
||||
TEMPLATE_DIRS = (
|
||||
PYTHON_PATH + "templates",
|
||||
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
|
||||
# Always use forward slashes, even on Windows.
|
||||
# Don't forget to use absolute paths, not relative paths.
|
||||
)
|
||||
|
||||
LOGFILE = PYTHON_PATH + 'troggle.log'
|
||||
@@ -1,5 +1,5 @@
|
||||
import sys
|
||||
# link localsettings to this file for use on expo computer in austria
|
||||
# This is the local settings for use with the docker compose dev setup. It is imported automatically
|
||||
|
||||
DATABASES = {
|
||||
'default': {
|
||||
@@ -47,13 +47,13 @@ MEDIA_URL = URL_ROOT + DIR_ROOT + 'site_media/'
|
||||
MEDIA_ROOT = REPOS_ROOT_PATH + '/troggle/media/'
|
||||
MEDIA_ADMIN_DIR = '/usr/lib/python2.7/site-packages/django/contrib/admin/media/'
|
||||
|
||||
STATIC_URL = URL_ROOT
|
||||
STATIC_ROOT = DIR_ROOT
|
||||
STATIC_URL = "/static/"
|
||||
STATIC_ROOT = "/expo/static"
|
||||
|
||||
JSLIB_URL = URL_ROOT + 'javascript/'
|
||||
|
||||
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/'
|
||||
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + '/tinymce_media/'
|
||||
TINY_MCE_MEDIA_ROOT = STATIC_ROOT + '/tiny_mce/'
|
||||
TINY_MCE_MEDIA_URL = STATIC_ROOT + '/tiny_mce/'
|
||||
|
||||
TEMPLATE_DIRS = (
|
||||
PYTHON_PATH + "templates",
|
||||
|
||||
@@ -52,8 +52,8 @@ MEDIA_ADMIN_DIR = '/usr/lib/python2.7/site-packages/django/contrib/admin/media/'
|
||||
|
||||
JSLIB_URL = URL_ROOT + 'javascript/'
|
||||
|
||||
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/'
|
||||
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + 'tinymce_media/'
|
||||
TINY_MCE_MEDIA_ROOT = STATIC_ROOT + '/tiny_mce/'
|
||||
TINY_MCE_MEDIA_URL = STATIC_ROOT + '/tiny_mce/'
|
||||
|
||||
TEMPLATE_DIRS = (
|
||||
PYTHON_PATH + "templates",
|
||||
@@ -62,16 +62,12 @@ TEMPLATE_DIRS = (
|
||||
# Don't forget to use absolute paths, not relative paths.
|
||||
)
|
||||
|
||||
LOGFILE = '/home/expo/troggle/troggle_log.txt'
|
||||
LOGFILE = '/home/expo/troggle/troggle.log'
|
||||
|
||||
FEINCMS_ADMIN_MEDIA='/site_media/feincms/'
|
||||
|
||||
EMAIL_HOST = "smtp.gmail.com"
|
||||
|
||||
EMAIL_HOST_USER = "cuccexpo@gmail.com"
|
||||
|
||||
EMAIL_HOST_PASSWORD = "khvtffkhvtff"
|
||||
|
||||
EMAIL_PORT=587
|
||||
|
||||
EMAIL_USE_TLS = True
|
||||
#EMAIL_HOST = "smtp.gmail.com"
|
||||
#EMAIL_HOST_USER = "cuccexpo@gmail.com"
|
||||
#EMAIL_HOST_PASSWORD = "khvtffkhvtff"
|
||||
#EMAIL_PORT=587
|
||||
#EMAIL_USE_TLS = True
|
||||
|
||||
68
logbooksdump.py
Normal file
68
logbooksdump.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import os
|
||||
import time
|
||||
import timeit
|
||||
import settings
|
||||
os.environ['PYTHONPATH'] = settings.PYTHON_PATH
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
|
||||
from django.core import management
|
||||
from django.db import connection, close_old_connections
|
||||
from django.contrib.auth.models import User
|
||||
from django.http import HttpResponse
|
||||
from django.core.urlresolvers import reverse
|
||||
from troggle.core.models import Cave, Entrance
|
||||
import troggle.flatpages.models
|
||||
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
def import_auto_logbooks():
|
||||
import parsers.logbooks
|
||||
import os
|
||||
for pt in troggle.core.models.PersonTrip.objects.all():
|
||||
pt.delete()
|
||||
for lbe in troggle.core.models.LogbookEntry.objects.all():
|
||||
lbe.delete()
|
||||
for expedition in troggle.core.models.Expedition.objects.all():
|
||||
directory = os.path.join(settings.EXPOWEB,
|
||||
"years",
|
||||
expedition.year,
|
||||
"autologbook")
|
||||
for root, dirs, filenames in os.walk(directory):
|
||||
for filename in filenames:
|
||||
print(os.path.join(root, filename))
|
||||
parsers.logbooks.parseAutoLogBookEntry(os.path.join(root, filename))
|
||||
|
||||
#Temporary function until definitive source of data transfered.
|
||||
from django.template.defaultfilters import slugify
|
||||
from django.template import Context, loader
|
||||
def dumplogbooks():
|
||||
def get_name(pe):
|
||||
if pe.nickname:
|
||||
return pe.nickname
|
||||
else:
|
||||
return pe.person.first_name
|
||||
for lbe in troggle.core.models.LogbookEntry.objects.all():
|
||||
dateStr = lbe.date.strftime("%Y-%m-%d")
|
||||
directory = os.path.join(settings.EXPOWEB,
|
||||
"years",
|
||||
lbe.expedition.year,
|
||||
"autologbook")
|
||||
if not os.path.isdir(directory):
|
||||
os.mkdir(directory)
|
||||
filename = os.path.join(directory,
|
||||
dateStr + "." + slugify(lbe.title)[:50] + ".html")
|
||||
if lbe.cave:
|
||||
print(lbe.cave.reference())
|
||||
trip = {"title": lbe.title, "html":lbe.text, "cave": lbe.cave.reference(), "caveOrLocation": "cave"}
|
||||
else:
|
||||
trip = {"title": lbe.title, "html":lbe.text, "location":lbe.place, "caveOrLocation": "location"}
|
||||
pts = [pt for pt in lbe.persontrip_set.all() if pt.personexpedition]
|
||||
persons = [{"name": get_name(pt.personexpedition), "TU": pt.time_underground, "author": pt.is_logbook_entry_author} for pt in pts]
|
||||
f = open(filename, "wb")
|
||||
template = loader.get_template('dataformat/logbookentry.html')
|
||||
context = Context({'trip': trip,
|
||||
'persons': persons,
|
||||
'date': dateStr,
|
||||
'expeditionyear': lbe.expedition.year})
|
||||
output = template.render(context)
|
||||
f.write(unicode(output).encode( "utf-8" ))
|
||||
f.close()
|
||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
0
modelviz.py
Executable file → Normal file
0
modelviz.py
Executable file → Normal file
@@ -17,19 +17,19 @@ def parseCaveQMs(cave,inputFile):
|
||||
try:
|
||||
steinBr=Cave.objects.get(official_name="Steinbrückenhöhle")
|
||||
except Cave.DoesNotExist:
|
||||
print "Steinbruckenhoehle is not in the database. Please run parsers.cavetab first."
|
||||
print("Steinbruckenhoehle is not in the database. Please run parsers.cavetab first.")
|
||||
return
|
||||
elif cave=='hauch':
|
||||
try:
|
||||
hauchHl=Cave.objects.get(official_name="Hauchhöhle")
|
||||
except Cave.DoesNotExist:
|
||||
print "Hauchhoele is not in the database. Please run parsers.cavetab first."
|
||||
print("Hauchhoele is not in the database. Please run parsers.cavetab first.")
|
||||
return
|
||||
elif cave =='kh':
|
||||
try:
|
||||
kh=Cave.objects.get(official_name="Kaninchenhöhle")
|
||||
except Cave.DoesNotExist:
|
||||
print "KH is not in the database. Please run parsers.cavetab first."
|
||||
print("KH is not in the database. Please run parsers.cavetab first.")
|
||||
parse_KH_QMs(kh, inputFile=inputFile)
|
||||
return
|
||||
|
||||
@@ -48,7 +48,7 @@ def parseCaveQMs(cave,inputFile):
|
||||
elif cave=='hauch':
|
||||
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl})
|
||||
if hadToCreate:
|
||||
print cave+" placeholder logbook entry for " + str(year) + " added to database"
|
||||
print((" - placeholder logbook entry for " + cave + " " + str(year) + " added to database"))
|
||||
QMnum=re.match(r".*?-\d*?-X?(?P<numb>\d*)",line[0]).group("numb")
|
||||
newQM = QM()
|
||||
newQM.found_by=placeholder
|
||||
@@ -71,19 +71,18 @@ def parseCaveQMs(cave,inputFile):
|
||||
if preexistingQM.new_since_parsing==False: #if the pre-existing QM has not been modified, overwrite it
|
||||
preexistingQM.delete()
|
||||
newQM.save()
|
||||
print "overwriting " + str(preexistingQM) +"\r",
|
||||
|
||||
#print((" - overwriting " + str(preexistingQM) +"\r"))
|
||||
else: # otherwise, print that it was ignored
|
||||
print "preserving "+ str(preexistingQM) + ", which was edited in admin \r",
|
||||
print((" - preserving " + str(preexistingQM) + ", which was edited in admin \r"))
|
||||
|
||||
except QM.DoesNotExist: #if there is no pre-existing QM, save the new one
|
||||
newQM.save()
|
||||
print "QM "+str(newQM) + ' added to database\r',
|
||||
# print("QM "+str(newQM) + ' added to database\r')
|
||||
|
||||
except KeyError: #check on this one
|
||||
continue
|
||||
except IndexError:
|
||||
print "Index error in " + str(line)
|
||||
print("Index error in " + str(line))
|
||||
continue
|
||||
|
||||
def parse_KH_QMs(kh, inputFile):
|
||||
@@ -104,7 +103,7 @@ def parse_KH_QMs(kh, inputFile):
|
||||
}
|
||||
nonLookupArgs={
|
||||
'grade':res['grade'],
|
||||
'nearest_station':res['nearest_station'],
|
||||
'nearest_station_name':res['nearest_station'],
|
||||
'location_description':res['description']
|
||||
}
|
||||
|
||||
@@ -115,3 +114,4 @@ parseCaveQMs(cave='stein',inputFile=r"1623/204/qm.csv")
|
||||
parseCaveQMs(cave='hauch',inputFile=r"1623/234/qm.csv")
|
||||
parseCaveQMs(cave='kh', inputFile="1623/161/qmtodo.htm")
|
||||
#parseCaveQMs(cave='balkonhoehle',inputFile=r"1623/264/qm.csv")
|
||||
|
||||
|
||||
51
parsers/caves.py
Normal file → Executable file
51
parsers/caves.py
Normal file → Executable file
@@ -6,16 +6,18 @@ import re
|
||||
|
||||
|
||||
def readcaves():
|
||||
newArea = models.Area(short_name = "1623", parent = None)
|
||||
newArea.save()
|
||||
newArea = models.Area(short_name = "1626", parent = None)
|
||||
newArea.save()
|
||||
print("Reading Entrances")
|
||||
|
||||
# Clear the cave data issues as we are reloading
|
||||
models.DataIssue.objects.filter(parser='caves').delete()
|
||||
|
||||
area_1623 = models.Area.objects.update_or_create(short_name = "1623", parent = None)
|
||||
area_1626 = models.Area.objects.update_or_create(short_name = "1626", parent = None)
|
||||
print(" - Reading Entrances")
|
||||
#print "list of <Slug> <Filename>"
|
||||
for filename in os.walk(settings.ENTRANCEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files
|
||||
if filename.endswith('.html'):
|
||||
readentrance(filename)
|
||||
print ("Reading Caves")
|
||||
print (" - Reading Caves")
|
||||
for filename in os.walk(settings.CAVEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files
|
||||
if filename.endswith('.html'):
|
||||
readcave(filename)
|
||||
@@ -51,7 +53,7 @@ def readentrance(filename):
|
||||
bearings = getXML(entrancecontents, "bearings", maxItems = 1, context = context)
|
||||
url = getXML(entrancecontents, "url", maxItems = 1, context = context)
|
||||
if len(non_public) == 1 and len(slugs) >= 1 and len(name) >= 1 and len(entrance_description) == 1 and len(explorers) == 1 and len(map_description) == 1 and len(location_description) == 1 and len(approach) == 1 and len(underground_description) == 1 and len(marking) == 1 and len(marking_comment) == 1 and len(findability) == 1 and len(findability_description) == 1 and len(alt) == 1 and len(northing) == 1 and len(easting) == 1 and len(tag_station) == 1 and len(exact_station) == 1 and len(other_station) == 1 and len(other_description) == 1 and len(bearings) == 1 and len(url) == 1:
|
||||
e = models.Entrance(name = name[0],
|
||||
e, state = models.Entrance.objects.update_or_create(name = name[0],
|
||||
non_public = {"True": True, "False": False, "true": True, "false": False,}[non_public[0]],
|
||||
entrance_description = entrance_description[0],
|
||||
explorers = explorers[0],
|
||||
@@ -75,14 +77,12 @@ def readentrance(filename):
|
||||
url = url[0],
|
||||
filename = filename,
|
||||
cached_primary_slug = slugs[0])
|
||||
e.save()
|
||||
primary = True
|
||||
for slug in slugs:
|
||||
#print slug, filename
|
||||
cs = models.EntranceSlug(entrance = e,
|
||||
cs = models.EntranceSlug.objects.update_or_create(entrance = e,
|
||||
slug = slug,
|
||||
primary = primary)
|
||||
cs.save()
|
||||
primary = False
|
||||
|
||||
def readcave(filename):
|
||||
@@ -117,7 +117,7 @@ def readcave(filename):
|
||||
url = getXML(cavecontents, "url", maxItems = 1, context = context)
|
||||
entrances = getXML(cavecontents, "entrance", context = context)
|
||||
if len(non_public) == 1 and len(slugs) >= 1 and len(official_name) == 1 and len(areas) >= 1 and len(kataster_code) == 1 and len(kataster_number) == 1 and len(unofficial_number) == 1 and len(explorers) == 1 and len(underground_description) == 1 and len(equipment) == 1 and len(references) == 1 and len(survey) == 1 and len(kataster_status) == 1 and len(underground_centre_line) == 1 and len(notes) == 1 and len(length) == 1 and len(depth) == 1 and len(extent) == 1 and len(survex_file) == 1 and len(description_file ) == 1 and len(url) == 1 and len(entrances) >= 1:
|
||||
c = models.Cave(non_public = {"True": True, "False": False, "true": True, "false": False,}[non_public[0]],
|
||||
c, state = models.Cave.objects.update_or_create(non_public = {"True": True, "False": False, "true": True, "false": False,}[non_public[0]],
|
||||
official_name = official_name[0],
|
||||
kataster_code = kataster_code[0],
|
||||
kataster_number = kataster_number[0],
|
||||
@@ -137,7 +137,6 @@ def readcave(filename):
|
||||
description_file = description_file[0],
|
||||
url = url[0],
|
||||
filename = filename)
|
||||
c.save()
|
||||
for area_slug in areas:
|
||||
area = models.Area.objects.filter(short_name = area_slug)
|
||||
if area:
|
||||
@@ -149,12 +148,13 @@ def readcave(filename):
|
||||
primary = True
|
||||
for slug in slugs:
|
||||
try:
|
||||
cs = models.CaveSlug(cave = c,
|
||||
cs = models.CaveSlug.objects.update_or_create(cave = c,
|
||||
slug = slug,
|
||||
primary = primary)
|
||||
cs.save()
|
||||
except:
|
||||
print("Can't find text (slug): %s, skipping %s" % (slug, context))
|
||||
message = " ! Can't find text (slug): %s, skipping %s" % (slug, context)
|
||||
models.DataIssue.objects.create(parser='caves', message=message)
|
||||
print(message)
|
||||
|
||||
primary = False
|
||||
for entrance in entrances:
|
||||
@@ -162,20 +162,27 @@ def readcave(filename):
|
||||
letter = getXML(entrance, "letter", maxItems = 1, context = context)[0]
|
||||
try:
|
||||
entrance = models.Entrance.objects.get(entranceslug__slug = slug)
|
||||
ce = models.CaveAndEntrance(cave = c, entrance_letter = letter, entrance = entrance)
|
||||
ce.save()
|
||||
ce = models.CaveAndEntrance.objects.update_or_create(cave = c, entrance_letter = letter, entrance = entrance)
|
||||
except:
|
||||
print ("Entrance text (slug) %s missing %s" % (slug, context))
|
||||
message = " ! Entrance text (slug) %s missing %s" % (slug, context)
|
||||
models.DataIssue.objects.create(parser='caves', message=message)
|
||||
print(message)
|
||||
|
||||
|
||||
def getXML(text, itemname, minItems = 1, maxItems = None, printwarnings = True, context = ""):
|
||||
# this next line is where it crashes horribly if a stray umlaut creeps in. Will fix itself in python3
|
||||
items = re.findall("<%(itemname)s>(.*?)</%(itemname)s>" % {"itemname": itemname}, text, re.S)
|
||||
if len(items) < minItems and printwarnings:
|
||||
print("%(count)i %(itemname)s found, at least %(min)i expected" % {"count": len(items),
|
||||
message = " ! %(count)i %(itemname)s found, at least %(min)i expected" % {"count": len(items),
|
||||
"itemname": itemname,
|
||||
"min": minItems} + context)
|
||||
"min": minItems} + context
|
||||
models.DataIssue.objects.create(parser='caves', message=message)
|
||||
print(message)
|
||||
|
||||
if maxItems is not None and len(items) > maxItems and printwarnings:
|
||||
print("%(count)i %(itemname)s found, no more than %(max)i expected" % {"count": len(items),
|
||||
message = " ! %(count)i %(itemname)s found, no more than %(max)i expected" % {"count": len(items),
|
||||
"itemname": itemname,
|
||||
"max": maxItems} + context)
|
||||
"max": maxItems} + context
|
||||
models.DataIssue.objects.create(parser='caves', message=message)
|
||||
print(message)
|
||||
return items
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
#.-*- coding: utf-8 -*-
|
||||
|
||||
from django.conf import settings
|
||||
import troggle.core.models as models
|
||||
|
||||
from parsers.people import GetPersonExpeditionNameLookup
|
||||
from parsers.cavetab import GetCaveLookup
|
||||
|
||||
from django.template.defaultfilters import slugify
|
||||
|
||||
from __future__ import (absolute_import, division,
|
||||
print_function)
|
||||
import csv
|
||||
import re
|
||||
import datetime
|
||||
import datetime, time
|
||||
import os
|
||||
import pickle
|
||||
|
||||
from django.conf import settings
|
||||
from django.template.defaultfilters import slugify
|
||||
|
||||
|
||||
from troggle.core.models import DataIssue, Expedition
|
||||
import troggle.core.models as models
|
||||
from parsers.people import GetPersonExpeditionNameLookup
|
||||
from parsers.cavetab import GetCaveLookup
|
||||
from utils import save_carefully
|
||||
|
||||
#
|
||||
@@ -23,19 +25,23 @@ from utils import save_carefully
|
||||
#
|
||||
# the logbook loading section
|
||||
#
|
||||
def GetTripPersons(trippeople, expedition, logtime_underground):
|
||||
def GetTripPersons(trippeople, expedition, logtime_underground):
|
||||
res = [ ]
|
||||
author = None
|
||||
for tripperson in re.split(",|\+|&|&(?!\w+;)| and ", trippeople):
|
||||
round_bracket_regex = re.compile(r"[\(\[].*?[\)\]]")
|
||||
for tripperson in re.split(r",|\+|&|&(?!\w+;)| and ", trippeople):
|
||||
tripperson = tripperson.strip()
|
||||
mul = re.match("<u>(.*?)</u>$(?i)", tripperson)
|
||||
mul = re.match(r"<u>(.*?)</u>$(?i)", tripperson)
|
||||
if mul:
|
||||
tripperson = mul.group(1).strip()
|
||||
if tripperson and tripperson[0] != '*':
|
||||
#assert tripperson in personyearmap, "'%s' << %s\n\n %s" % (tripperson, trippeople, personyearmap)
|
||||
tripperson = re.sub(round_bracket_regex, "", tripperson).strip()
|
||||
personyear = GetPersonExpeditionNameLookup(expedition).get(tripperson.lower())
|
||||
if not personyear:
|
||||
print "NoMatchFor: '%s'" % tripperson
|
||||
print(" - No name match for: '%s'" % tripperson)
|
||||
message = "No name match for: '%s' in year '%s'" % (tripperson, expedition.year)
|
||||
models.DataIssue.objects.create(parser='logbooks', message=message)
|
||||
res.append((personyear, logtime_underground))
|
||||
if mul:
|
||||
author = personyear
|
||||
@@ -45,7 +51,7 @@ def GetTripPersons(trippeople, expedition, logtime_underground):
|
||||
author = res[-1][0]
|
||||
return res, author
|
||||
|
||||
def GetTripCave(place): #need to be fuzzier about matching here. Already a very slow function...
|
||||
def GetTripCave(place): #need to be fuzzier about matching here. Already a very slow function...
|
||||
# print "Getting cave for " , place
|
||||
try:
|
||||
katastNumRes=[]
|
||||
@@ -65,45 +71,53 @@ def GetTripCave(place): #need to be fuzzier about matching h
|
||||
return tripCaveRes
|
||||
|
||||
elif len(tripCaveRes)>1:
|
||||
print "Ambiguous place " + str(place) + " entered. Choose from " + str(tripCaveRes)
|
||||
print("Ambiguous place " + str(place) + " entered. Choose from " + str(tripCaveRes))
|
||||
correctIndex=input("type list index of correct cave")
|
||||
return tripCaveRes[correctIndex]
|
||||
else:
|
||||
print "No cave found for place " , place
|
||||
print("No cave found for place " , place)
|
||||
return
|
||||
|
||||
|
||||
logentries = [] # the entire logbook is a single object: a list of entries
|
||||
noncaveplaces = [ "Journey", "Loser Plateau" ]
|
||||
def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground):
|
||||
|
||||
def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground, entry_type="wiki"):
|
||||
""" saves a logbook entry and related persontrips """
|
||||
global logentries
|
||||
|
||||
entrytuple = (date, place, title, text,
|
||||
trippeople, expedition, logtime_underground, entry_type)
|
||||
logentries.append(entrytuple)
|
||||
|
||||
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground)
|
||||
if not author:
|
||||
print "skipping logentry", title
|
||||
print(" * Skipping logentry: " + title + " - no author for entry")
|
||||
message = "Skipping logentry: %s - no author for entry in year '%s'" % (title, expedition.year)
|
||||
models.DataIssue.objects.create(parser='logbooks', message=message)
|
||||
return
|
||||
|
||||
# tripCave = GetTripCave(place)
|
||||
#
|
||||
|
||||
#tripCave = GetTripCave(place)
|
||||
|
||||
lplace = place.lower()
|
||||
if lplace not in noncaveplaces:
|
||||
cave=GetCaveLookup().get(lplace)
|
||||
|
||||
#Check for an existing copy of the current entry, and save
|
||||
expeditionday = expedition.get_expedition_day(date)
|
||||
lookupAttribs={'date':date, 'title':title}
|
||||
nonLookupAttribs={'place':place, 'text':text, 'expedition':expedition, 'cave':cave, 'slug':slugify(title)[:50]}
|
||||
lookupAttribs={'date':date, 'title':title}
|
||||
nonLookupAttribs={'place':place, 'text':text, 'expedition':expedition, 'cave':cave, 'slug':slugify(title)[:50], 'entry_type':entry_type}
|
||||
lbo, created=save_carefully(models.LogbookEntry, lookupAttribs, nonLookupAttribs)
|
||||
|
||||
|
||||
for tripperson, time_underground in trippersons:
|
||||
lookupAttribs={'personexpedition':tripperson, 'logbook_entry':lbo}
|
||||
nonLookupAttribs={'time_underground':time_underground, 'is_logbook_entry_author':(tripperson == author)}
|
||||
#print nonLookupAttribs
|
||||
save_carefully(models.PersonTrip, lookupAttribs, nonLookupAttribs)
|
||||
|
||||
|
||||
def ParseDate(tripdate, year):
|
||||
""" Interprets dates in the expo logbooks and returns a correct datetime.date object """
|
||||
mdatestandard = re.match("(\d\d\d\d)-(\d\d)-(\d\d)", tripdate)
|
||||
mdategoof = re.match("(\d\d?)/0?(\d)/(20|19)?(\d\d)", tripdate)
|
||||
mdatestandard = re.match(r"(\d\d\d\d)-(\d\d)-(\d\d)", tripdate)
|
||||
mdategoof = re.match(r"(\d\d?)/0?(\d)/(20|19)?(\d\d)", tripdate)
|
||||
if mdatestandard:
|
||||
assert mdatestandard.group(1) == year, (tripdate, year)
|
||||
year, month, day = int(mdatestandard.group(1)), int(mdatestandard.group(2)), int(mdatestandard.group(3))
|
||||
@@ -115,37 +129,35 @@ def ParseDate(tripdate, year):
|
||||
assert False, tripdate
|
||||
return datetime.date(year, month, day)
|
||||
|
||||
# 2007, 2008, 2006
|
||||
# 2006, 2008 - 2009
|
||||
def Parselogwikitxt(year, expedition, txt):
|
||||
trippara = re.findall("===(.*?)===([\s\S]*?)(?====)", txt)
|
||||
trippara = re.findall(r"===(.*?)===([\s\S]*?)(?====)", txt)
|
||||
for triphead, triptext in trippara:
|
||||
tripheadp = triphead.split("|")
|
||||
#print "ttt", tripheadp
|
||||
assert len(tripheadp) == 3, (tripheadp, triptext)
|
||||
tripdate, tripplace, trippeople = tripheadp
|
||||
tripsplace = tripplace.split(" - ")
|
||||
tripcave = tripsplace[0].strip()
|
||||
|
||||
tul = re.findall("T/?U:?\s*(\d+(?:\.\d*)?|unknown)\s*(hrs|hours)?", triptext)
|
||||
tul = re.findall(r"T/?U:?\s*(\d+(?:\.\d*)?|unknown)\s*(hrs|hours)?", triptext)
|
||||
if tul:
|
||||
#assert len(tul) <= 1, (triphead, triptext)
|
||||
#assert tul[0][1] in ["hrs", "hours"], (triphead, triptext)
|
||||
tu = tul[0][0]
|
||||
else:
|
||||
tu = ""
|
||||
#assert tripcave == "Journey", (triphead, triptext)
|
||||
|
||||
#print tripdate
|
||||
ldate = ParseDate(tripdate.strip(), year)
|
||||
#print "\n", tripcave, "--- ppp", trippeople, len(triptext)
|
||||
EnterLogIntoDbase(date = ldate, place = tripcave, title = tripplace, text = triptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
|
||||
|
||||
# 2002, 2004, 2005
|
||||
# 2002, 2004, 2005, 2007, 2010 - now
|
||||
def Parseloghtmltxt(year, expedition, txt):
|
||||
tripparas = re.findall("<hr\s*/>([\s\S]*?)(?=<hr)", txt)
|
||||
#print(" - Starting log html parser")
|
||||
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
|
||||
logbook_entry_count = 0
|
||||
for trippara in tripparas:
|
||||
#print(" - HR detected - maybe a trip?")
|
||||
logbook_entry_count += 1
|
||||
|
||||
s = re.match('''(?x)(?:\s*<div\sclass="tripdate"\sid=".*?">.*?</div>\s*<p>)? # second date
|
||||
s = re.match(r'''(?x)(?:\s*<div\sclass="tripdate"\sid=".*?">.*?</div>\s*<p>)? # second date
|
||||
\s*(?:<a\s+id="(.*?)"\s*/>\s*</a>)?
|
||||
\s*<div\s+class="tripdate"\s*(?:id="(.*?)")?>(.*?)</div>(?:<p>)?
|
||||
\s*<div\s+class="trippeople">\s*(.*?)</div>
|
||||
@@ -155,46 +167,39 @@ def Parseloghtmltxt(year, expedition, txt):
|
||||
\s*$
|
||||
''', trippara)
|
||||
if not s:
|
||||
if not re.search("Rigging Guide", trippara):
|
||||
print "can't parse: ", trippara # this is 2007 which needs editing
|
||||
#assert s, trippara
|
||||
if not re.search(r"Rigging Guide", trippara):
|
||||
print(("can't parse: ", trippara)) # this is 2007 which needs editing
|
||||
continue
|
||||
|
||||
tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups()
|
||||
ldate = ParseDate(tripdate.strip(), year)
|
||||
#assert tripid[:-1] == "t" + tripdate, (tripid, tripdate)
|
||||
trippeople = re.sub("Ol(?!l)", "Olly", trippeople)
|
||||
trippeople = re.sub("Wook(?!e)", "Wookey", trippeople)
|
||||
triptitles = triptitle.split(" - ")
|
||||
if len(triptitles) >= 2:
|
||||
tripcave = triptitles[0]
|
||||
else:
|
||||
tripcave = "UNKNOWN"
|
||||
#print "\n", tripcave, "--- ppp", trippeople, len(triptext)
|
||||
ltriptext = re.sub("</p>", "", triptext)
|
||||
ltriptext = re.sub("\s*?\n\s*", " ", ltriptext)
|
||||
ltriptext = re.sub("<p>", "\n\n", ltriptext).strip()
|
||||
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
|
||||
ltriptext = re.sub(r"</p>", "", triptext)
|
||||
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
||||
ltriptext = re.sub(r"<p>", "</br></br>", ltriptext).strip()
|
||||
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext,
|
||||
trippeople=trippeople, expedition=expedition, logtime_underground=0,
|
||||
entry_type="html")
|
||||
|
||||
|
||||
# main parser for pre-2001. simpler because the data has been hacked so much to fit it
|
||||
# main parser for 1991 - 2001. simpler because the data has been hacked so much to fit it
|
||||
def Parseloghtml01(year, expedition, txt):
|
||||
tripparas = re.findall("<hr[\s/]*>([\s\S]*?)(?=<hr)", txt)
|
||||
tripparas = re.findall(r"<hr[\s/]*>([\s\S]*?)(?=<hr)", txt)
|
||||
for trippara in tripparas:
|
||||
s = re.match(u"(?s)\s*(?:<p>)?(.*?)</?p>(.*)$(?i)", trippara)
|
||||
assert s, trippara[:300]
|
||||
tripheader, triptext = s.group(1), s.group(2)
|
||||
mtripid = re.search('<a id="(.*?)"', tripheader)
|
||||
mtripid = re.search(r'<a id="(.*?)"', tripheader)
|
||||
tripid = mtripid and mtripid.group(1) or ""
|
||||
tripheader = re.sub("</?(?:[ab]|span)[^>]*>", "", tripheader)
|
||||
|
||||
#print " ", [tripheader]
|
||||
#continue
|
||||
tripheader = re.sub(r"</?(?:[ab]|span)[^>]*>", "", tripheader)
|
||||
|
||||
tripdate, triptitle, trippeople = tripheader.split("|")
|
||||
ldate = ParseDate(tripdate.strip(), year)
|
||||
|
||||
mtu = re.search('<p[^>]*>(T/?U.*)', triptext)
|
||||
mtu = re.search(r'<p[^>]*>(T/?U.*)', triptext)
|
||||
if mtu:
|
||||
tu = mtu.group(1)
|
||||
triptext = triptext[:mtu.start(0)] + triptext[mtu.end():]
|
||||
@@ -206,38 +211,35 @@ def Parseloghtml01(year, expedition, txt):
|
||||
|
||||
ltriptext = triptext
|
||||
|
||||
mtail = re.search('(?:<a href="[^"]*">[^<]*</a>|\s|/|-|&|</?p>|\((?:same day|\d+)\))*$', ltriptext)
|
||||
mtail = re.search(r'(?:<a href="[^"]*">[^<]*</a>|\s|/|-|&|</?p>|\((?:same day|\d+)\))*$', ltriptext)
|
||||
if mtail:
|
||||
#print mtail.group(0)
|
||||
ltriptext = ltriptext[:mtail.start(0)]
|
||||
ltriptext = re.sub("</p>", "", ltriptext)
|
||||
ltriptext = re.sub("\s*?\n\s*", " ", ltriptext)
|
||||
ltriptext = re.sub("<p>|<br>", "\n\n", ltriptext).strip()
|
||||
#ltriptext = re.sub("[^\s0-9a-zA-Z\-.,:;'!]", "NONASCII", ltriptext)
|
||||
ltriptext = re.sub("</?u>", "_", ltriptext)
|
||||
ltriptext = re.sub("</?i>", "''", ltriptext)
|
||||
ltriptext = re.sub("</?b>", "'''", ltriptext)
|
||||
|
||||
|
||||
#print ldate, trippeople.strip()
|
||||
# could includ the tripid (url link for cross referencing)
|
||||
EnterLogIntoDbase(date=ldate, place=tripcave, title=triptitle, text=ltriptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
|
||||
ltriptext = re.sub(r"</p>", "", ltriptext)
|
||||
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
||||
ltriptext = re.sub(r"<p>|<br>", "\n\n", ltriptext).strip()
|
||||
ltriptext = re.sub(r"</?u>", "_", ltriptext)
|
||||
ltriptext = re.sub(r"</?i>", "''", ltriptext)
|
||||
ltriptext = re.sub(r"</?b>", "'''", ltriptext)
|
||||
|
||||
EnterLogIntoDbase(date=ldate, place=tripcave, title=triptitle, text=ltriptext,
|
||||
trippeople=trippeople, expedition=expedition, logtime_underground=0,
|
||||
entry_type="html")
|
||||
|
||||
# parser for 2003
|
||||
def Parseloghtml03(year, expedition, txt):
|
||||
tripparas = re.findall("<hr\s*/>([\s\S]*?)(?=<hr)", txt)
|
||||
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
|
||||
for trippara in tripparas:
|
||||
s = re.match(u"(?s)\s*<p>(.*?)</p>(.*)$", trippara)
|
||||
assert s, trippara
|
||||
tripheader, triptext = s.group(1), s.group(2)
|
||||
tripheader = re.sub(" ", " ", tripheader)
|
||||
tripheader = re.sub("\s+", " ", tripheader).strip()
|
||||
tripheader = re.sub(r" ", " ", tripheader)
|
||||
tripheader = re.sub(r"\s+", " ", tripheader).strip()
|
||||
sheader = tripheader.split(" -- ")
|
||||
tu = ""
|
||||
if re.match("T/U|Time underwater", sheader[-1]):
|
||||
tu = sheader.pop()
|
||||
if len(sheader) != 3:
|
||||
print "header not three pieces", sheader
|
||||
print("header not three pieces", sheader)
|
||||
tripdate, triptitle, trippeople = sheader
|
||||
ldate = ParseDate(tripdate.strip(), year)
|
||||
triptitles = triptitle.split(" , ")
|
||||
@@ -245,38 +247,14 @@ def Parseloghtml03(year, expedition, txt):
|
||||
tripcave = triptitles[0]
|
||||
else:
|
||||
tripcave = "UNKNOWN"
|
||||
#print tripcave, "--- ppp", triptitle, trippeople, len(triptext)
|
||||
ltriptext = re.sub("</p>", "", triptext)
|
||||
ltriptext = re.sub("\s*?\n\s*", " ", ltriptext)
|
||||
ltriptext = re.sub("<p>", "\n\n", ltriptext).strip()
|
||||
ltriptext = re.sub("[^\s0-9a-zA-Z\-.,:;'!&()\[\]<>?=+*%]", "_NONASCII_", ltriptext)
|
||||
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
|
||||
ltriptext = re.sub(r"</p>", "", triptext)
|
||||
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
||||
ltriptext = re.sub(r"<p>", "\n\n", ltriptext).strip()
|
||||
ltriptext = re.sub(r"[^\s0-9a-zA-Z\-.,:;'!&()\[\]<>?=+*%]", "_NONASCII_", ltriptext)
|
||||
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle,
|
||||
text = ltriptext, trippeople=trippeople, expedition=expedition,
|
||||
logtime_underground=0, entry_type="html")
|
||||
|
||||
yearlinks = [
|
||||
# ("2013", "2013/logbook.html", Parseloghtmltxt),
|
||||
("2012", "2012/logbook.html", Parseloghtmltxt),
|
||||
("2011", "2011/logbook.html", Parseloghtmltxt),
|
||||
("2010", "2010/logbook.html", Parselogwikitxt),
|
||||
("2009", "2009/2009logbook.txt", Parselogwikitxt),
|
||||
("2008", "2008/2008logbook.txt", Parselogwikitxt),
|
||||
("2007", "2007/logbook.html", Parseloghtmltxt),
|
||||
("2006", "2006/logbook/logbook_06.txt", Parselogwikitxt),
|
||||
("2005", "2005/logbook.html", Parseloghtmltxt),
|
||||
("2004", "2004/logbook.html", Parseloghtmltxt),
|
||||
("2003", "2003/logbook.html", Parseloghtml03),
|
||||
("2002", "2002/logbook.html", Parseloghtmltxt),
|
||||
("2001", "2001/log.htm", Parseloghtml01),
|
||||
("2000", "2000/log.htm", Parseloghtml01),
|
||||
("1999", "1999/log.htm", Parseloghtml01),
|
||||
("1998", "1998/log.htm", Parseloghtml01),
|
||||
("1997", "1997/log.htm", Parseloghtml01),
|
||||
("1996", "1996/log.htm", Parseloghtml01),
|
||||
("1995", "1995/log.htm", Parseloghtml01),
|
||||
("1994", "1994/log.htm", Parseloghtml01),
|
||||
("1993", "1993/log.htm", Parseloghtml01),
|
||||
("1992", "1992/log.htm", Parseloghtml01),
|
||||
("1991", "1991/log.htm", Parseloghtml01),
|
||||
]
|
||||
|
||||
def SetDatesFromLogbookEntries(expedition):
|
||||
"""
|
||||
@@ -295,54 +273,109 @@ def SetDatesFromLogbookEntries(expedition):
|
||||
persontrip.persontrip_next = None
|
||||
lprevpersontrip = persontrip
|
||||
persontrip.save()
|
||||
|
||||
|
||||
|
||||
def LoadLogbookForExpedition(expedition):
|
||||
""" Parses all logbook entries for one expedition """
|
||||
|
||||
expowebbase = os.path.join(settings.EXPOWEB, "years")
|
||||
year = str(expedition.year)
|
||||
for lyear, lloc, parsefunc in yearlinks:
|
||||
if lyear == year:
|
||||
break
|
||||
fin = open(os.path.join(expowebbase, lloc))
|
||||
print "opennning", lloc
|
||||
txt = fin.read().decode("latin1")
|
||||
fin.close()
|
||||
parsefunc(year, expedition, txt)
|
||||
SetDatesFromLogbookEntries(expedition)
|
||||
return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count())
|
||||
|
||||
|
||||
def LoadLogbookForExpedition(expedition):
|
||||
""" Parses all logbook entries for one expedition
|
||||
"""
|
||||
global logentries
|
||||
logbook_parseable = False
|
||||
logbook_cached = False
|
||||
yearlinks = settings.LOGBOOK_PARSER_SETTINGS
|
||||
expologbase = os.path.join(settings.EXPOWEB, "years")
|
||||
|
||||
if expedition.year in yearlinks:
|
||||
logbookfile = os.path.join(expologbase, yearlinks[expedition.year][0])
|
||||
parsefunc = yearlinks[expedition.year][1]
|
||||
else:
|
||||
logbookfile = os.path.join(expologbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE)
|
||||
parsefunc = settings.DEFAULT_LOGBOOK_PARSER
|
||||
cache_filename = logbookfile + ".cache"
|
||||
|
||||
try:
|
||||
bad_cache = False
|
||||
now = time.time()
|
||||
cache_t = os.path.getmtime(cache_filename)
|
||||
if os.path.getmtime(logbookfile) - cache_t > 2: # at least 2 secs later
|
||||
bad_cache= True
|
||||
if now - cache_t > 30*24*60*60:
|
||||
bad_cache= True
|
||||
if bad_cache:
|
||||
print(" - ! Cache is either stale or more than 30 days old. Deleting it.")
|
||||
os.remove(cache_filename)
|
||||
logentries=[]
|
||||
print(" ! Removed stale or corrupt cache file")
|
||||
raise
|
||||
print(" - Reading cache: " + cache_filename, end='')
|
||||
try:
|
||||
with open(cache_filename, "rb") as f:
|
||||
logentries = pickle.load(f)
|
||||
print(" -- Loaded ", len(logentries), " log entries")
|
||||
logbook_cached = True
|
||||
except:
|
||||
print("\n ! Failed to load corrupt cache. Deleting it.\n")
|
||||
os.remove(cache_filename)
|
||||
logentries=[]
|
||||
raise
|
||||
except : # no cache found
|
||||
#print(" - No cache \"" + cache_filename +"\"")
|
||||
try:
|
||||
file_in = open(logbookfile,'rb')
|
||||
txt = file_in.read().decode("latin1")
|
||||
file_in.close()
|
||||
logbook_parseable = True
|
||||
print((" - Using: " + parsefunc + " to parse " + logbookfile))
|
||||
except (IOError):
|
||||
logbook_parseable = False
|
||||
print((" ! Couldn't open logbook " + logbookfile))
|
||||
|
||||
if logbook_parseable:
|
||||
parser = globals()[parsefunc]
|
||||
parser(expedition.year, expedition, txt)
|
||||
SetDatesFromLogbookEntries(expedition)
|
||||
# and this has also stored all the log entries in logentries[]
|
||||
if len(logentries) >0:
|
||||
print(" - Cacheing " , len(logentries), " log entries")
|
||||
with open(cache_filename, "wb") as fc:
|
||||
pickle.dump(logentries, fc, 2)
|
||||
else:
|
||||
print(" ! NO TRIP entries found in logbook, check the syntax.")
|
||||
|
||||
logentries=[] # flush for next year
|
||||
|
||||
if logbook_cached:
|
||||
i=0
|
||||
for entrytuple in range(len(logentries)):
|
||||
date, place, title, text, trippeople, expedition, logtime_underground, \
|
||||
entry_type = logentries[i]
|
||||
EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground,\
|
||||
entry_type)
|
||||
i +=1
|
||||
|
||||
#return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count())
|
||||
|
||||
def LoadLogbooks():
|
||||
""" This is the master function for parsing all logbooks into the Troggle database. Requires yearlinks, which is a list of tuples for each expedition with expedition year, logbook path, and parsing function. """
|
||||
|
||||
#Deletion has been moved to a seperate function to enable the non-destructive importing
|
||||
#models.LogbookEntry.objects.all().delete()
|
||||
expowebbase = os.path.join(settings.EXPOWEB, "years")
|
||||
#yearlinks = [ ("2001", "2001/log.htm", Parseloghtml01), ] #overwrite
|
||||
#yearlinks = [ ("1996", "1996/log.htm", Parseloghtml01),] # overwrite
|
||||
""" This is the master function for parsing all logbooks into the Troggle database.
|
||||
"""
|
||||
DataIssue.objects.filter(parser='logbooks').delete()
|
||||
expos = Expedition.objects.all()
|
||||
nologbook = ["1976", "1977","1978","1979","1980","1980","1981","1983","1984",
|
||||
"1985","1986","1987","1988","1989","1990",]
|
||||
for expo in expos:
|
||||
if expo.year not in nologbook:
|
||||
print((" - Logbook for: " + expo.year))
|
||||
LoadLogbookForExpedition(expo)
|
||||
|
||||
for year, lloc, parsefunc in yearlinks:
|
||||
# This will not work until the corresponding year exists in the database.
|
||||
# In 2012 this needed noscript/folk.csv to be updated first.
|
||||
expedition = models.Expedition.objects.filter(year = year)[0]
|
||||
fin = open(os.path.join(expowebbase, lloc))
|
||||
txt = fin.read().decode("latin1")
|
||||
fin.close()
|
||||
parsefunc(year, expedition, txt)
|
||||
SetDatesFromLogbookEntries(expedition)
|
||||
|
||||
dateRegex = re.compile('<span\s+class="date">(\d\d\d\d)-(\d\d)-(\d\d)</span>', re.S)
|
||||
expeditionYearRegex = re.compile('<span\s+class="expeditionyear">(.*?)</span>', re.S)
|
||||
titleRegex = re.compile('<H1>(.*?)</H1>', re.S)
|
||||
reportRegex = re.compile('<div\s+class="report">(.*)</div>\s*</body>', re.S)
|
||||
personRegex = re.compile('<div\s+class="person">(.*?)</div>', re.S)
|
||||
nameAuthorRegex = re.compile('<span\s+class="name(,author|)">(.*?)</span>', re.S)
|
||||
TURegex = re.compile('<span\s+class="TU">([0-9]*\.?[0-9]+)</span>', re.S)
|
||||
locationRegex = re.compile('<span\s+class="location">(.*?)</span>', re.S)
|
||||
caveRegex = re.compile('<span\s+class="cave">(.*?)</span>', re.S)
|
||||
dateRegex = re.compile(r'<span\s+class="date">(\d\d\d\d)-(\d\d)-(\d\d)</span>', re.S)
|
||||
expeditionYearRegex = re.compile(r'<span\s+class="expeditionyear">(.*?)</span>', re.S)
|
||||
titleRegex = re.compile(r'<H1>(.*?)</H1>', re.S)
|
||||
reportRegex = re.compile(r'<div\s+class="report">(.*)</div>\s*</body>', re.S)
|
||||
personRegex = re.compile(r'<div\s+class="person">(.*?)</div>', re.S)
|
||||
nameAuthorRegex = re.compile(r'<span\s+class="name(,author|)">(.*?)</span>', re.S)
|
||||
TURegex = re.compile(r'<span\s+class="TU">([0-9]*\.?[0-9]+)</span>', re.S)
|
||||
locationRegex = re.compile(r'<span\s+class="location">(.*?)</span>', re.S)
|
||||
caveRegex = re.compile(r'<span\s+class="cave">(.*?)</span>', re.S)
|
||||
|
||||
def parseAutoLogBookEntry(filename):
|
||||
errors = []
|
||||
@@ -355,25 +388,25 @@ def parseAutoLogBookEntry(filename):
|
||||
year, month, day = [int(x) for x in dateMatch.groups()]
|
||||
date = datetime.date(year, month, day)
|
||||
else:
|
||||
errors.append("Date could not be found")
|
||||
errors.append(" - Date could not be found")
|
||||
|
||||
expeditionYearMatch = expeditionYearRegex.search(contents)
|
||||
if expeditionYearMatch:
|
||||
try:
|
||||
expedition = models.Expedition.objects.get(year = expeditionYearMatch.groups()[0])
|
||||
personExpeditionNameLookup = GetPersonExpeditionNameLookup(expedition)
|
||||
except models.Expedition.DoesNotExist:
|
||||
errors.append("Expedition not in database")
|
||||
except Expedition.DoesNotExist:
|
||||
errors.append(" - Expedition not in database")
|
||||
else:
|
||||
errors.append("Expediton Year could not be parsed")
|
||||
errors.append(" - Expedition Year could not be parsed")
|
||||
|
||||
titleMatch = titleRegex.search(contents)
|
||||
if titleMatch:
|
||||
title, = titleMatch.groups()
|
||||
if len(title) > settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH:
|
||||
errors.append("Title too long")
|
||||
errors.append(" - Title too long")
|
||||
else:
|
||||
errors.append("Title could not be found")
|
||||
errors.append(" - Title could not be found")
|
||||
|
||||
caveMatch = caveRegex.search(contents)
|
||||
if caveMatch:
|
||||
@@ -382,7 +415,7 @@ def parseAutoLogBookEntry(filename):
|
||||
cave = models.getCaveByReference(caveRef)
|
||||
except AssertionError:
|
||||
cave = None
|
||||
errors.append("Cave not found in database")
|
||||
errors.append(" - Cave not found in database")
|
||||
else:
|
||||
cave = None
|
||||
|
||||
@@ -393,13 +426,13 @@ def parseAutoLogBookEntry(filename):
|
||||
location = None
|
||||
|
||||
if cave is None and location is None:
|
||||
errors.append("Location nor cave could not be found")
|
||||
errors.append(" - Location nor cave could not be found")
|
||||
|
||||
reportMatch = reportRegex.search(contents)
|
||||
if reportMatch:
|
||||
report, = reportMatch.groups()
|
||||
else:
|
||||
errors.append("Contents could not be found")
|
||||
errors.append(" - Contents could not be found")
|
||||
if errors:
|
||||
return errors # Easiest to bail out at this point as we need to make sure that we know which expedition to look for people from.
|
||||
people = []
|
||||
@@ -410,21 +443,21 @@ def parseAutoLogBookEntry(filename):
|
||||
if name.lower() in personExpeditionNameLookup:
|
||||
personExpo = personExpeditionNameLookup[name.lower()]
|
||||
else:
|
||||
errors.append("Person could not be found in database")
|
||||
errors.append(" - Person could not be found in database")
|
||||
author = bool(author)
|
||||
else:
|
||||
errors.append("Persons name could not be found")
|
||||
errors.append(" - Persons name could not be found")
|
||||
|
||||
TUMatch = TURegex.search(contents)
|
||||
if TUMatch:
|
||||
TU, = TUMatch.groups()
|
||||
else:
|
||||
errors.append("TU could not be found")
|
||||
errors.append(" - TU could not be found")
|
||||
if not errors:
|
||||
people.append((name, author, TU))
|
||||
if errors:
|
||||
return errors # Bail out before commiting to the database
|
||||
logbookEntry = models.LogbookEntry(date = date,
|
||||
return errors # Bail out before committing to the database
|
||||
logbookEntry = LogbookEntry(date = date,
|
||||
expedition = expedition,
|
||||
title = title, cave = cave, place = location,
|
||||
text = report, slug = slugify(title)[:50],
|
||||
@@ -435,4 +468,4 @@ def parseAutoLogBookEntry(filename):
|
||||
time_underground = TU,
|
||||
logbook_entry = logbookEntry,
|
||||
is_logbook_entry_author = author).save()
|
||||
print logbookEntry
|
||||
print(logbookEntry)
|
||||
|
||||
@@ -4,53 +4,62 @@ from django.conf import settings
|
||||
import troggle.core.models as models
|
||||
import csv, re, datetime, os, shutil
|
||||
from utils import save_carefully
|
||||
from HTMLParser import HTMLParser
|
||||
from unidecode import unidecode
|
||||
|
||||
def saveMugShot(mugShotPath, mugShotFilename, person):
|
||||
if mugShotFilename.startswith(r'i/'): #if filename in cell has the directory attached (I think they all do), remove it
|
||||
mugShotFilename=mugShotFilename[2:]
|
||||
else:
|
||||
mugShotFilename=mugShotFilename # just in case one doesn't
|
||||
# def saveMugShot(mugShotPath, mugShotFilename, person):
|
||||
# if mugShotFilename.startswith(r'i/'): #if filename in cell has the directory attached (I think they all do), remove it
|
||||
# mugShotFilename=mugShotFilename[2:]
|
||||
# else:
|
||||
# mugShotFilename=mugShotFilename # just in case one doesn't
|
||||
|
||||
dummyObj=models.DPhoto(file=mugShotFilename)
|
||||
# dummyObj=models.DPhoto(file=mugShotFilename)
|
||||
|
||||
#Put a copy of the file in the right place. mugShotObj.file.path is determined by the django filesystemstorage specified in models.py
|
||||
if not os.path.exists(dummyObj.file.path):
|
||||
shutil.copy(mugShotPath, dummyObj.file.path)
|
||||
# #Put a copy of the file in the right place. mugShotObj.file.path is determined by the django filesystemstorage specified in models.py
|
||||
# if not os.path.exists(dummyObj.file.path):
|
||||
# shutil.copy(mugShotPath, dummyObj.file.path)
|
||||
|
||||
mugShotObj, created = save_carefully(
|
||||
models.DPhoto,
|
||||
lookupAttribs={'is_mugshot':True, 'file':mugShotFilename},
|
||||
nonLookupAttribs={'caption':"Mugshot for "+person.first_name+" "+person.last_name}
|
||||
)
|
||||
# mugShotObj, created = save_carefully(
|
||||
# models.DPhoto,
|
||||
# lookupAttribs={'is_mugshot':True, 'file':mugShotFilename},
|
||||
# nonLookupAttribs={'caption':"Mugshot for "+person.first_name+" "+person.last_name}
|
||||
# )
|
||||
|
||||
if created:
|
||||
mugShotObj.contains_person.add(person)
|
||||
mugShotObj.save()
|
||||
# if created:
|
||||
# mugShotObj.contains_person.add(person)
|
||||
# mugShotObj.save()
|
||||
|
||||
def parseMugShotAndBlurb(personline, header, person):
|
||||
"""create mugshot Photo instance"""
|
||||
mugShotFilename=personline[header["Mugshot"]]
|
||||
mugShotPath = os.path.join(settings.EXPOWEB, "folk", mugShotFilename)
|
||||
if mugShotPath[-3:]=='jpg': #if person just has an image, add it
|
||||
saveMugShot(mugShotPath=mugShotPath, mugShotFilename=mugShotFilename, person=person)
|
||||
#saveMugShot(mugShotPath=mugShotPath, mugShotFilename=mugShotFilename, person=person)
|
||||
pass
|
||||
elif mugShotPath[-3:]=='htm': #if person has an html page, find the image(s) and add it. Also, add the text from the html page to the "blurb" field in his model instance.
|
||||
personPageOld=open(mugShotPath,'r').read()
|
||||
if not person.blurb:
|
||||
person.blurb=re.search('<body>.*<hr',personPageOld,re.DOTALL).group() #this needs to be refined, take care of the HTML and make sure it doesn't match beyond the blurb
|
||||
for mugShotFilename in re.findall('i/.*?jpg',personPageOld,re.DOTALL):
|
||||
mugShotPath = os.path.join(settings.EXPOWEB, "folk", mugShotFilename)
|
||||
saveMugShot(mugShotPath=mugShotPath, mugShotFilename=mugShotFilename, person=person)
|
||||
pblurb=re.search('<body>.*<hr',personPageOld,re.DOTALL)
|
||||
if pblurb:
|
||||
#this needs to be refined, take care of the HTML and make sure it doesn't match beyond the blurb.
|
||||
#Only finds the first image, not all of them
|
||||
person.blurb=re.search('<body>.*<hr',personPageOld,re.DOTALL).group()
|
||||
else:
|
||||
print "ERROR: --------------- Broken link or Blurb parse error in ", mugShotFilename
|
||||
#for mugShotFilename in re.findall('i/.*?jpg',personPageOld,re.DOTALL):
|
||||
# mugShotPath = os.path.join(settings.EXPOWEB, "folk", mugShotFilename)
|
||||
# saveMugShot(mugShotPath=mugShotPath, mugShotFilename=mugShotFilename, person=person)
|
||||
person.save()
|
||||
|
||||
def LoadPersonsExpos():
|
||||
|
||||
persontab = open(os.path.join(settings.EXPOWEB, "noinfo", "folk.csv"))
|
||||
persontab = open(os.path.join(settings.EXPOWEB, "folk", "folk.csv"))
|
||||
personreader = csv.reader(persontab)
|
||||
headers = personreader.next()
|
||||
header = dict(zip(headers, range(len(headers))))
|
||||
|
||||
# make expeditions
|
||||
print "Loading expeditions"
|
||||
print(" - Loading expeditions")
|
||||
years = headers[5:]
|
||||
|
||||
for year in years:
|
||||
@@ -59,22 +68,35 @@ def LoadPersonsExpos():
|
||||
|
||||
save_carefully(models.Expedition, lookupAttribs, nonLookupAttribs)
|
||||
|
||||
|
||||
# make persons
|
||||
print "Loading personexpeditions"
|
||||
#expoers2008 = """Edvin Deadman,Kathryn Hopkins,Djuke Veldhuis,Becka Lawson,Julian Todd,Natalie Uomini,Aaron Curtis,Tony Rooke,Ollie Stevens,Frank Tully,Martin Jahnke,Mark Shinwell,Jess Stirrups,Nial Peters,Serena Povia,Olly Madge,Steve Jones,Pete Harley,Eeva Makiranta,Keith Curtis""".split(",")
|
||||
#expomissing = set(expoers2008)
|
||||
print(" - Loading personexpeditions")
|
||||
|
||||
for personline in personreader:
|
||||
name = personline[header["Name"]]
|
||||
name = re.sub("<.*?>", "", name)
|
||||
mname = re.match("(\w+)(?:\s((?:van |ten )?\w+))?(?:\s\(([^)]*)\))?", name)
|
||||
nickname = mname.group(3) or ""
|
||||
|
||||
lookupAttribs={'first_name':mname.group(1), 'last_name':(mname.group(2) or "")}
|
||||
nonLookupAttribs={'is_vfho':personline[header["VfHO member"]],}
|
||||
name = re.sub(r"<.*?>", "", name)
|
||||
|
||||
firstname = ""
|
||||
nickname = ""
|
||||
|
||||
rawlastname = personline[header["Lastname"]].strip()
|
||||
matchlastname = re.match(r"^([\w&;\s]+)(?:\(([^)]*)\))?", rawlastname)
|
||||
lastname = matchlastname.group(1).strip()
|
||||
|
||||
splitnick = re.match(r"^([\w&;\s]+)(?:\(([^)]*)\))?", name)
|
||||
fullname = splitnick.group(1)
|
||||
|
||||
nickname = splitnick.group(2) or ""
|
||||
|
||||
fullname = fullname.strip()
|
||||
names = fullname.split(' ')
|
||||
firstname = names[0]
|
||||
if len(names) == 1:
|
||||
lastname = ""
|
||||
|
||||
lookupAttribs={'first_name':firstname, 'last_name':(lastname or "")}
|
||||
nonLookupAttribs={'is_vfho':personline[header["VfHO member"]], 'fullname':fullname}
|
||||
person, created = save_carefully(models.Person, lookupAttribs, nonLookupAttribs)
|
||||
|
||||
|
||||
parseMugShotAndBlurb(personline=personline, header=header, person=person)
|
||||
|
||||
# make person expedition from table
|
||||
@@ -88,6 +110,8 @@ def LoadPersonsExpos():
|
||||
|
||||
# this fills in those people for whom 2008 was their first expo
|
||||
#print "Loading personexpeditions 2008"
|
||||
#expoers2008 = """Edvin Deadman,Kathryn Hopkins,Djuke Veldhuis,Becka Lawson,Julian Todd,Natalie Uomini,Aaron Curtis,Tony Rooke,Ollie Stevens,Frank Tully,Martin Jahnke,Mark Shinwell,Jess Stirrups,Nial Peters,Serena Povia,Olly Madge,Steve Jones,Pete Harley,Eeva Makiranta,Keith Curtis""".split(",")
|
||||
#expomissing = set(expoers2008)
|
||||
#for name in expomissing:
|
||||
# firstname, lastname = name.split()
|
||||
# is_guest = name in ["Eeva Makiranta", "Keith Curtis"]
|
||||
@@ -103,18 +127,6 @@ def LoadPersonsExpos():
|
||||
# personexpedition = models.PersonExpedition(person=person, expedition=expedition, nickname="", is_guest=is_guest)
|
||||
# personexpedition.save()
|
||||
|
||||
#Notability is now a method of person. Makes no sense to store it in the database; it would need to be recalculated every time something changes. - AC 16 Feb 09
|
||||
# could rank according to surveying as well
|
||||
#print "Setting person notability"
|
||||
#for person in models.Person.objects.all():
|
||||
#person.notability = 0.0
|
||||
#for personexpedition in person.personexpedition_set.all():
|
||||
#if not personexpedition.is_guest:
|
||||
#person.notability += 1.0 / (2012 - int(personexpedition.expedition.year))
|
||||
#person.bisnotable = person.notability > 0.3 # I don't know how to filter by this
|
||||
#person.save()
|
||||
|
||||
|
||||
# used in other referencing parser functions
|
||||
# expedition name lookup cached for speed (it's a very big list)
|
||||
Gpersonexpeditionnamelookup = { }
|
||||
@@ -127,20 +139,33 @@ def GetPersonExpeditionNameLookup(expedition):
|
||||
res = { }
|
||||
duplicates = set()
|
||||
|
||||
print "Calculating GetPersonExpeditionNameLookup for", expedition.year
|
||||
#print("Calculating GetPersonExpeditionNameLookup for " + expedition.year)
|
||||
personexpeditions = models.PersonExpedition.objects.filter(expedition=expedition)
|
||||
htmlparser = HTMLParser()
|
||||
for personexpedition in personexpeditions:
|
||||
possnames = [ ]
|
||||
f = personexpedition.person.first_name.lower()
|
||||
l = personexpedition.person.last_name.lower()
|
||||
f = unidecode(htmlparser.unescape(personexpedition.person.first_name.lower()))
|
||||
l = unidecode(htmlparser.unescape(personexpedition.person.last_name.lower()))
|
||||
full = unidecode(htmlparser.unescape(personexpedition.person.fullname.lower()))
|
||||
if l:
|
||||
possnames.append(f + " " + l)
|
||||
possnames.append(f + " " + l[0])
|
||||
possnames.append(f + l[0])
|
||||
possnames.append(f[0] + " " + l)
|
||||
possnames.append(f)
|
||||
if personexpedition.nickname:
|
||||
if full not in possnames:
|
||||
possnames.append(full)
|
||||
if personexpedition.nickname not in possnames:
|
||||
possnames.append(personexpedition.nickname.lower())
|
||||
if l:
|
||||
# This allows for nickname to be used for short name eg Phil
|
||||
# adding Phil Sargent to the list
|
||||
if str(personexpedition.nickname.lower() + " " + l) not in possnames:
|
||||
possnames.append(personexpedition.nickname.lower() + " " + l)
|
||||
if str(personexpedition.nickname.lower() + " " + l[0]) not in possnames:
|
||||
possnames.append(personexpedition.nickname.lower() + " " + l[0])
|
||||
if str(personexpedition.nickname.lower() + l[0]) not in possnames:
|
||||
possnames.append(personexpedition.nickname.lower() + l[0])
|
||||
|
||||
for possname in possnames:
|
||||
if possname in res:
|
||||
|
||||
579
parsers/survex.py
Normal file → Executable file
579
parsers/survex.py
Normal file → Executable file
@@ -1,41 +1,67 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from subprocess import PIPE, Popen, call
|
||||
|
||||
from django.utils.timezone import get_current_timezone, make_aware
|
||||
|
||||
import troggle.settings as settings
|
||||
import troggle.core.models as models
|
||||
import troggle.settings as settings
|
||||
|
||||
from subprocess import call, Popen, PIPE
|
||||
|
||||
import troggle.core.models_survex as models_survex
|
||||
from troggle.parsers.people import GetPersonExpeditionNameLookup
|
||||
import re
|
||||
import os
|
||||
from troggle.core.views_caves import MapLocations
|
||||
|
||||
"""A 'survex block' is a *begin...*end set of cave data.
|
||||
A 'survexscansfolder' is what we today call a "survey scans folder" or a "wallet".
|
||||
"""
|
||||
|
||||
def LoadSurvexLineLeg(survexblock, stardata, sline, comment):
|
||||
line_leg_regex = re.compile(r"[\d\-+.]+$")
|
||||
survexlegsalllength = 0.0
|
||||
survexlegsnumber = 0
|
||||
|
||||
def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
|
||||
global survexlegsalllength
|
||||
global survexlegsnumber
|
||||
# The try catches here need replacing as they are relatively expensive
|
||||
ls = sline.lower().split()
|
||||
ssfrom = survexblock.MakeSurvexStation(ls[stardata["from"]])
|
||||
ssto = survexblock.MakeSurvexStation(ls[stardata["to"]])
|
||||
|
||||
|
||||
survexleg = models.SurvexLeg(block=survexblock, stationfrom=ssfrom, stationto=ssto)
|
||||
# this next fails for two surface survey svx files which use / for decimal point
|
||||
# e.g. '29/09' in the tape measurement, or use decimals but in brackets, e.g. (06.05)
|
||||
if stardata["type"] == "normal":
|
||||
try:
|
||||
survexleg.tape = float(ls[stardata["tape"]])
|
||||
except ValueError:
|
||||
print("Tape misread in", survexblock.survexfile.path)
|
||||
print("Stardata:", stardata)
|
||||
print("Line:", ls)
|
||||
survexleg.tape = 1000
|
||||
survexlegsnumber += 1
|
||||
except ValueError:
|
||||
print("! Tape misread in", survexblock.survexfile.path)
|
||||
print(" Stardata:", stardata)
|
||||
print(" Line:", ls)
|
||||
message = ' ! Value Error: Tape misread in line %s in %s' % (ls, survexblock.survexfile.path)
|
||||
models.DataIssue.objects.create(parser='survex', message=message)
|
||||
survexleg.tape = 0
|
||||
try:
|
||||
lclino = ls[stardata["clino"]]
|
||||
except:
|
||||
print("Clino misread in", survexblock.survexfile.path)
|
||||
print("Stardata:", stardata)
|
||||
print("Line:", ls)
|
||||
print("! Clino misread in", survexblock.survexfile.path)
|
||||
print(" Stardata:", stardata)
|
||||
print(" Line:", ls)
|
||||
message = ' ! Value Error: Clino misread in line %s in %s' % (ls, survexblock.survexfile.path)
|
||||
models.DataIssue.objects.create(parser='survex', message=message)
|
||||
lclino = error
|
||||
try:
|
||||
lcompass = ls[stardata["compass"]]
|
||||
except:
|
||||
print("Compass misread in", survexblock.survexfile.path)
|
||||
print("Stardata:", stardata)
|
||||
print("Line:", ls)
|
||||
print("! Compass misread in", survexblock.survexfile.path)
|
||||
print(" Stardata:", stardata)
|
||||
print(" Line:", ls)
|
||||
message = ' ! Value Error: Compass misread in line %s in %s' % (ls, survexblock.survexfile.path)
|
||||
models.DataIssue.objects.create(parser='survex', message=message)
|
||||
lcompass = error
|
||||
if lclino == "up":
|
||||
survexleg.compass = 0.0
|
||||
@@ -47,27 +73,37 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment):
|
||||
try:
|
||||
survexleg.compass = float(lcompass)
|
||||
except ValueError:
|
||||
print("Compass misread in", survexblock.survexfile.path)
|
||||
print("Stardata:", stardata)
|
||||
print("Line:", ls)
|
||||
print("! Compass misread in", survexblock.survexfile.path)
|
||||
print(" Stardata:", stardata)
|
||||
print(" Line:", ls)
|
||||
message = ' ! Value Error: line %s in %s' % (ls, survexblock.survexfile.path)
|
||||
models.DataIssue.objects.create(parser='survex', message=message)
|
||||
survexleg.compass = 1000
|
||||
survexleg.clino = -90.0
|
||||
else:
|
||||
assert re.match(r"[\d\-+.]+$", lcompass), ls
|
||||
assert re.match(r"[\d\-+.]+$", lclino) and lclino != "-", ls
|
||||
assert line_leg_regex.match(lcompass), ls
|
||||
assert line_leg_regex.match(lclino) and lclino != "-", ls
|
||||
survexleg.compass = float(lcompass)
|
||||
survexleg.clino = float(lclino)
|
||||
|
||||
|
||||
if cave:
|
||||
survexleg.cave = cave
|
||||
|
||||
# only save proper legs
|
||||
survexleg.save()
|
||||
|
||||
# No need to save as we are measuring lengths only on parsing now.
|
||||
# delete the object so that django autosaving doesn't save it.
|
||||
survexleg = None
|
||||
#survexleg.save()
|
||||
|
||||
itape = stardata.get("tape")
|
||||
if itape:
|
||||
try:
|
||||
survexblock.totalleglength += float(ls[itape])
|
||||
survexlegsalllength += float(ls[itape])
|
||||
except ValueError:
|
||||
print("Length not added")
|
||||
survexblock.save()
|
||||
print("! Length not added")
|
||||
# No need to save as we are measuring lengths only on parsing now.
|
||||
#survexblock.save()
|
||||
|
||||
|
||||
def LoadSurvexEquate(survexblock, sline):
|
||||
@@ -80,96 +116,286 @@ def LoadSurvexEquate(survexblock, sline):
|
||||
|
||||
def LoadSurvexLinePassage(survexblock, stardata, sline, comment):
|
||||
pass
|
||||
|
||||
|
||||
stardatadefault = {"type":"normal", "t":"leg", "from":0, "to":1, "tape":2, "compass":3, "clino":4}
|
||||
stardataparamconvert = {"length":"tape", "bearing":"compass", "gradient":"clino"}
|
||||
|
||||
regex_comment = re.compile(r"([^;]*?)\s*(?:;\s*(.*))?\n?$")
|
||||
regex_ref = re.compile(r'.*?ref.*?(\d+)\s*#\s*(X)?\s*(\d+)')
|
||||
regex_star = re.compile(r'\s*\*[\s,]*(\w+)\s*(.*?)\s*(?:;.*)?$')
|
||||
# years from 1960 to 2039
|
||||
regex_starref = re.compile(r'^\s*\*ref[\s.:]*((?:19[6789]\d)|(?:20[0123]\d))\s*#?\s*(X)?\s*(.*?\d+.*?)$(?i)')
|
||||
# regex_starref = re.compile("""?x # VERBOSE mode - can't get this to work
|
||||
# ^\s*\*ref # look for *ref at start of line
|
||||
# [\s.:]* # some spaces, stops or colons
|
||||
# ((?:19[6789]\d)|(?:20[0123]\d)) # a date from 1960 to 2039 - captured as one field
|
||||
# \s*# # spaces then hash separator
|
||||
# ?\s*(X) # optional X - captured
|
||||
# ?\s*(.*?\d+.*?) # maybe a space, then at least one digit in the string - captured
|
||||
# $(?i)""", re.X) # the end (do the whole thing case insensitively)
|
||||
|
||||
|
||||
regex_team = re.compile(r"(Insts|Notes|Tape|Dog|Useless|Pics|Helper|Disto|Consultant)\s+(.*)$(?i)")
|
||||
regex_team_member = re.compile(r" and | / |, | & | \+ |^both$|^none$(?i)")
|
||||
regex_qm = re.compile(r'^\s*QM(\d)\s+?([a-dA-DxX])\s+([\w\-]+)\.(\d+)\s+(([\w\-]+)\.(\d+)|\-)\s+(.+)$')
|
||||
|
||||
insp = ""
|
||||
callcount = 0
|
||||
def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
||||
"""Follows the *include links in all the survex files from the root file 1623.svx
|
||||
and reads in the survex blocks, other data and the wallet references (survexscansfolder) as it
|
||||
goes. This part of the data import process is where the maximum memory is used and where it
|
||||
crashes on memory-constrained machines.
|
||||
"""
|
||||
iblankbegins = 0
|
||||
text = [ ]
|
||||
stardata = stardatadefault
|
||||
teammembers = [ ]
|
||||
|
||||
# uncomment to print out all files during parsing
|
||||
print("Reading file:", survexblock.survexfile.path)
|
||||
while True:
|
||||
svxline = fin.readline().decode("latin1")
|
||||
if not svxline:
|
||||
return
|
||||
textlines.append(svxline)
|
||||
|
||||
global insp
|
||||
global callcount
|
||||
global survexlegsnumber
|
||||
|
||||
# uncomment to print out all files during parsing
|
||||
print(insp+" - Reading file: " + survexblock.survexfile.path + " <> " + survexfile.path)
|
||||
stamp = datetime.now()
|
||||
lineno = 0
|
||||
|
||||
sys.stderr.flush();
|
||||
callcount +=1
|
||||
if callcount >=10:
|
||||
callcount=0
|
||||
print(".", file=sys.stderr,end='')
|
||||
|
||||
# Try to find the cave in the DB if not use the string as before
|
||||
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", survexblock.survexfile.path)
|
||||
if path_match:
|
||||
pos_cave = '%s-%s' % (path_match.group(1), path_match.group(2))
|
||||
# print(insp+'Match')
|
||||
# print(insp+os_cave)
|
||||
cave = models.getCaveByReference(pos_cave)
|
||||
if cave:
|
||||
survexfile.cave = cave
|
||||
svxlines = ''
|
||||
svxlines = fin.read().splitlines()
|
||||
# print(insp+'Cave - preloop ' + str(survexfile.cave))
|
||||
# print(insp+survexblock)
|
||||
for svxline in svxlines:
|
||||
|
||||
# print(insp+survexblock)
|
||||
|
||||
# print(insp+svxline)
|
||||
# if not svxline:
|
||||
# print(insp+' - Not survex')
|
||||
# return
|
||||
# textlines.append(svxline)
|
||||
|
||||
lineno += 1
|
||||
|
||||
# print(insp+' - Line: %d' % lineno)
|
||||
|
||||
# break the line at the comment
|
||||
sline, comment = re.match(r"([^;]*?)\s*(?:;\s*(.*))?\n?$", svxline.strip()).groups()
|
||||
|
||||
sline, comment = regex_comment.match(svxline.strip()).groups()
|
||||
# detect ref line pointing to the scans directory
|
||||
mref = comment and re.match(r'.*?ref.*?(\d+)\s*#\s*(\d+)', comment)
|
||||
mref = comment and regex_ref.match(comment)
|
||||
if mref:
|
||||
refscan = "%s#%s" % (mref.group(1), mref.group(2))
|
||||
yr, letterx, wallet = mref.groups()
|
||||
if not letterx:
|
||||
letterx = ""
|
||||
else:
|
||||
letterx = "X"
|
||||
if len(wallet)<2:
|
||||
wallet = "0" + wallet
|
||||
refscan = "%s#%s%s" % (yr, letterx, wallet )
|
||||
#print(insp+' - Wallet ;ref - %s - looking for survexscansfolder' % refscan)
|
||||
survexscansfolders = models.SurvexScansFolder.objects.filter(walletname=refscan)
|
||||
if survexscansfolders:
|
||||
survexblock.survexscansfolder = survexscansfolders[0]
|
||||
#survexblock.refscandir = "%s/%s%%23%s" % (mref.group(1), mref.group(1), mref.group(2))
|
||||
survexblock.save()
|
||||
continue
|
||||
|
||||
survexblock.save()
|
||||
# print(insp+' - Wallet ; ref - %s - found in survexscansfolders' % refscan)
|
||||
else:
|
||||
message = ' ! Wallet ; ref - %s - NOT found in survexscansfolders %s-%s-%s' % (refscan,yr,letterx,wallet)
|
||||
print(insp+message)
|
||||
models.DataIssue.objects.create(parser='survex', message=message)
|
||||
|
||||
# This whole section should be moved if we can have *QM become a proper survex command
|
||||
# Spec of QM in SVX files, currently commented out need to add to survex
|
||||
# needs to match regex_qm
|
||||
# ;Serial number grade(A/B/C/D/X) nearest-station resolution-station description
|
||||
# ;QM1 a hobnob_hallway_2.42 hobnob-hallway_3.42 junction of keyhole passage
|
||||
# ;QM1 a hobnob_hallway_2.42 - junction of keyhole passage
|
||||
qmline = comment and regex_qm.match(comment)
|
||||
if qmline:
|
||||
# print(insp+qmline.groups())
|
||||
#(u'1', u'B', u'miraclemaze', u'1.17', u'-', None, u'\tcontinuation of rift')
|
||||
qm_no = qmline.group(1)
|
||||
qm_grade = qmline.group(2)
|
||||
qm_from_section = qmline.group(3)
|
||||
qm_from_station = qmline.group(4)
|
||||
qm_resolve_section = qmline.group(6)
|
||||
qm_resolve_station = qmline.group(7)
|
||||
qm_notes = qmline.group(8)
|
||||
|
||||
# print(insp+'Cave - %s' % survexfile.cave)
|
||||
# print(insp+'QM no %d' % int(qm_no))
|
||||
# print(insp+'QM grade %s' % qm_grade)
|
||||
# print(insp+'QM section %s' % qm_from_section)
|
||||
# print(insp+'QM station %s' % qm_from_station)
|
||||
# print(insp+'QM res section %s' % qm_resolve_section)
|
||||
# print(insp+'QM res station %s' % qm_resolve_station)
|
||||
# print(insp+'QM notes %s' % qm_notes)
|
||||
|
||||
# If the QM isn't resolved (has a resolving station) then load it
|
||||
if not qm_resolve_section or qm_resolve_section is not '-' or qm_resolve_section is not 'None':
|
||||
from_section = models.SurvexBlock.objects.filter(name=qm_from_section)
|
||||
# If we can find a section (survex note chunck, named)
|
||||
if len(from_section) > 0:
|
||||
# print(insp+from_section[0])
|
||||
from_station = models.SurvexStation.objects.filter(block=from_section[0], name=qm_from_station)
|
||||
# If we can find a from station then we have the nearest station and can import it
|
||||
if len(from_station) > 0:
|
||||
# print(insp+from_station[0])
|
||||
qm = models.QM.objects.create(number=qm_no,
|
||||
nearest_station=from_station[0],
|
||||
grade=qm_grade.upper(),
|
||||
location_description=qm_notes)
|
||||
else:
|
||||
# print(insp+' - QM found but resolved')
|
||||
pass
|
||||
|
||||
#print(insp+'Cave -sline ' + str(cave))
|
||||
if not sline:
|
||||
continue
|
||||
|
||||
|
||||
# detect the star ref command
|
||||
mstar = regex_starref.match(sline)
|
||||
if mstar:
|
||||
yr,letterx,wallet = mstar.groups()
|
||||
if not letterx:
|
||||
letterx = ""
|
||||
else:
|
||||
letterx = "X"
|
||||
if len(wallet)<2:
|
||||
wallet = "0" + wallet
|
||||
assert (int(yr)>1960 and int(yr)<2039), "Wallet year out of bounds: %s" % yr
|
||||
assert (int(wallet)<100), "Wallet number more than 100: %s" % wallet
|
||||
refscan = "%s#%s%s" % (yr, letterx, wallet)
|
||||
survexscansfolders = models.SurvexScansFolder.objects.filter(walletname=refscan)
|
||||
if survexscansfolders:
|
||||
survexblock.survexscansfolder = survexscansfolders[0]
|
||||
survexblock.save()
|
||||
# print(insp+' - Wallet *REF - %s - found in survexscansfolders' % refscan)
|
||||
else:
|
||||
message = ' ! Wallet *REF - %s - NOT found in survexscansfolders %s-%s-%s' % (refscan,yr,letterx,wallet)
|
||||
print(insp+message)
|
||||
models.DataIssue.objects.create(parser='survex', message=message)
|
||||
continue
|
||||
|
||||
# detect the star command
|
||||
mstar = re.match(r'\s*\*[\s,]*(\w+)\s*(.*?)\s*(?:;.*)?$', sline)
|
||||
mstar = regex_star.match(sline)
|
||||
if not mstar:
|
||||
if "from" in stardata:
|
||||
LoadSurvexLineLeg(survexblock, stardata, sline, comment)
|
||||
# print(insp+'Cave ' + str(survexfile.cave))
|
||||
# print(insp+survexblock)
|
||||
LoadSurvexLineLeg(survexblock, stardata, sline, comment, survexfile.cave)
|
||||
# print(insp+' - From: ')
|
||||
# print(insp+stardata)
|
||||
pass
|
||||
elif stardata["type"] == "passage":
|
||||
LoadSurvexLinePassage(survexblock, stardata, sline, comment)
|
||||
# print(insp+' - Passage: ')
|
||||
#Missing "station" in stardata.
|
||||
continue
|
||||
|
||||
|
||||
# detect the star command
|
||||
cmd, line = mstar.groups()
|
||||
cmd = cmd.lower()
|
||||
if re.match("include$(?i)", cmd):
|
||||
includepath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line))
|
||||
includesurvexfile = models.SurvexFile(path=includepath, cave=survexfile.cave)
|
||||
print(insp+' - Include path found including - ' + includepath)
|
||||
# Try to find the cave in the DB if not use the string as before
|
||||
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", includepath)
|
||||
if path_match:
|
||||
pos_cave = '%s-%s' % (path_match.group(1), path_match.group(2))
|
||||
# print(insp+pos_cave)
|
||||
cave = models.getCaveByReference(pos_cave)
|
||||
if cave:
|
||||
survexfile.cave = cave
|
||||
else:
|
||||
print(insp+' - No match in DB (i) for %s, so loading..' % includepath)
|
||||
includesurvexfile = models.SurvexFile(path=includepath)
|
||||
includesurvexfile.save()
|
||||
includesurvexfile.SetDirectory()
|
||||
if includesurvexfile.exists():
|
||||
survexblock.save()
|
||||
fininclude = includesurvexfile.OpenFile()
|
||||
insp += "> "
|
||||
RecursiveLoad(survexblock, includesurvexfile, fininclude, textlines)
|
||||
|
||||
insp = insp[2:]
|
||||
|
||||
elif re.match("begin$(?i)", cmd):
|
||||
if line:
|
||||
if line:
|
||||
newsvxpath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line))
|
||||
# Try to find the cave in the DB if not use the string as before
|
||||
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", newsvxpath)
|
||||
if path_match:
|
||||
pos_cave = '%s-%s' % (path_match.group(1), path_match.group(2))
|
||||
# print(insp+pos_cave)
|
||||
cave = models.getCaveByReference(pos_cave)
|
||||
if cave:
|
||||
survexfile.cave = cave
|
||||
else:
|
||||
print(insp+' - No match (b) for %s' % newsvxpath)
|
||||
|
||||
previousnlegs = survexlegsnumber
|
||||
name = line.lower()
|
||||
survexblockdown = models.SurvexBlock(name=name, begin_char=fin.tell(), parent=survexblock, survexpath=survexblock.survexpath+"."+name, cave=survexblock.cave, survexfile=survexfile, totalleglength=0.0)
|
||||
print(insp+' - Begin found for: ' + name)
|
||||
# print(insp+'Block cave: ' + str(survexfile.cave))
|
||||
survexblockdown = models.SurvexBlock(name=name, begin_char=fin.tell(), parent=survexblock, survexpath=survexblock.survexpath+"."+name, cave=survexfile.cave, survexfile=survexfile, totalleglength=0.0)
|
||||
survexblockdown.save()
|
||||
survexblock.save()
|
||||
survexblock = survexblockdown
|
||||
# print(insp+survexblockdown)
|
||||
textlinesdown = [ ]
|
||||
insp += "> "
|
||||
RecursiveLoad(survexblockdown, survexfile, fin, textlinesdown)
|
||||
insp = insp[2:]
|
||||
else:
|
||||
iblankbegins += 1
|
||||
|
||||
|
||||
elif re.match("end$(?i)", cmd):
|
||||
if iblankbegins:
|
||||
iblankbegins -= 1
|
||||
else:
|
||||
survexblock.text = "".join(textlines)
|
||||
#survexblock.text = "".join(textlines)
|
||||
# .text not used, using it for number of legs per block
|
||||
legsinblock = survexlegsnumber - previousnlegs
|
||||
print("LEGS: {} (previous: {}, now:{})".format(legsinblock,previousnlegs,survexlegsnumber))
|
||||
survexblock.text = str(legsinblock)
|
||||
survexblock.save()
|
||||
# print(insp+' - End found: ')
|
||||
endstamp = datetime.now()
|
||||
timetaken = endstamp - stamp
|
||||
# print(insp+' - Time to process: ' + str(timetaken))
|
||||
return
|
||||
|
||||
|
||||
elif re.match("date$(?i)", cmd):
|
||||
if len(line) == 10:
|
||||
survexblock.date = re.sub(r"\.", "-", line)
|
||||
#print(insp+' - Date found: ' + line)
|
||||
survexblock.date = make_aware(datetime.strptime(re.sub(r"\.", "-", line), '%Y-%m-%d'), get_current_timezone())
|
||||
expeditions = models.Expedition.objects.filter(year=line[:4])
|
||||
if expeditions:
|
||||
assert len(expeditions) == 1
|
||||
survexblock.expedition = expeditions[0]
|
||||
survexblock.expeditionday = survexblock.expedition.get_expedition_day(survexblock.date)
|
||||
survexblock.save()
|
||||
|
||||
|
||||
elif re.match("team$(?i)", cmd):
|
||||
mteammember = re.match(r"(Insts|Notes|Tape|Dog|Useless|Pics|Helper|Disto|Consultant)\s+(.*)$(?i)", line)
|
||||
pass
|
||||
# print(insp+' - Team found: ')
|
||||
mteammember = regex_team.match(line)
|
||||
if mteammember:
|
||||
for tm in re.split(r" and | / |, | & | \+ |^both$|^none$(?i)", mteammember.group(2)):
|
||||
for tm in regex_team_member.split(mteammember.group(2)):
|
||||
if tm:
|
||||
personexpedition = survexblock.expedition and GetPersonExpeditionNameLookup(survexblock.expedition).get(tm.lower())
|
||||
if (personexpedition, tm) not in teammembers:
|
||||
@@ -179,18 +405,23 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
||||
if personexpedition:
|
||||
personrole.person=personexpedition.person
|
||||
personrole.save()
|
||||
|
||||
|
||||
elif cmd == "title":
|
||||
survextitle = models.SurvexTitle(survexblock=survexblock, title=line.strip('"'), cave=survexblock.cave)
|
||||
#print(insp+' - Title found: ')
|
||||
survextitle = models.SurvexTitle(survexblock=survexblock, title=line.strip('"'), cave=survexfile.cave)
|
||||
survextitle.save()
|
||||
|
||||
pass
|
||||
|
||||
elif cmd == "require":
|
||||
# should we check survex version available for processing?
|
||||
pass
|
||||
|
||||
elif cmd == "data":
|
||||
#print(insp+' - Data found: ')
|
||||
ls = line.lower().split()
|
||||
stardata = { "type":ls[0] }
|
||||
#print(insp+' - Star data: ', stardata)
|
||||
#print(insp+ls)
|
||||
for i in range(0, len(ls)):
|
||||
stardata[stardataparamconvert.get(ls[i], ls[i])] = i - 1
|
||||
if ls[0] in ["normal", "cartesian", "nosurvey"]:
|
||||
@@ -199,44 +430,32 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
||||
stardata = stardatadefault
|
||||
else:
|
||||
assert ls[0] == "passage", line
|
||||
|
||||
|
||||
elif cmd == "equate":
|
||||
#print(insp+' - Equate found: ')
|
||||
LoadSurvexEquate(survexblock, line)
|
||||
|
||||
elif cmd == "fix":
|
||||
#print(insp+' - Fix found: ')
|
||||
survexblock.MakeSurvexStation(line.split()[0])
|
||||
|
||||
else:
|
||||
#print(insp+' - Stuff')
|
||||
if cmd not in ["sd", "include", "units", "entrance", "data", "flags", "title", "export", "instrument",
|
||||
"calibrate", "set", "infer", "alias", "ref", "cs", "declination", "case"]:
|
||||
print("Unrecognised command in line:", cmd, line, survexblock, survexblock.survexfile.path)
|
||||
|
||||
|
||||
def ReloadSurvexCave(survex_cave, area):
|
||||
print(survex_cave, area)
|
||||
cave = models.Cave.objects.get(kataster_number=survex_cave, area__short_name=area)
|
||||
print(cave)
|
||||
#cave = models.Cave.objects.get(kataster_number=survex_cave)
|
||||
cave.survexblock_set.all().delete()
|
||||
cave.survexfile_set.all().delete()
|
||||
cave.survexdirectory_set.all().delete()
|
||||
|
||||
survexfile = models.SurvexFile(path="caves-" + cave.kat_area() + "/" + survex_cave + "/" + survex_cave, cave=cave)
|
||||
survexfile.save()
|
||||
survexfile.SetDirectory()
|
||||
|
||||
survexblockroot = models.SurvexBlock(name="root", survexpath="caves-" + cave.kat_area(), begin_char=0, cave=cave, survexfile=survexfile, totalleglength=0.0)
|
||||
survexblockroot.save()
|
||||
fin = survexfile.OpenFile()
|
||||
textlines = [ ]
|
||||
RecursiveLoad(survexblockroot, survexfile, fin, textlines)
|
||||
survexblockroot.text = "".join(textlines)
|
||||
survexblockroot.save()
|
||||
"calibrate", "set", "infer", "alias", "cs", "declination", "case"]:
|
||||
message = "! Bad svx command in line:%s %s %s %s" % (cmd, line, survexblock, survexblock.survexfile.path)
|
||||
print(insp+message)
|
||||
models.DataIssue.objects.create(parser='survex', message=message)
|
||||
|
||||
endstamp = datetime.now()
|
||||
timetaken = endstamp - stamp
|
||||
# print(insp+' - Time to process: ' + str(timetaken))
|
||||
|
||||
def LoadAllSurvexBlocks():
|
||||
global survexlegsalllength
|
||||
global survexlegsnumber
|
||||
|
||||
print('Loading All Survex Blocks...')
|
||||
print(' - Flushing All Survex Blocks...')
|
||||
|
||||
models.SurvexBlock.objects.all().delete()
|
||||
models.SurvexFile.objects.all().delete()
|
||||
@@ -248,52 +467,172 @@ def LoadAllSurvexBlocks():
|
||||
models.SurvexStation.objects.all().delete()
|
||||
|
||||
print(" - Data flushed")
|
||||
# Clear the data issues as we are reloading
|
||||
models.DataIssue.objects.filter(parser='survex').delete()
|
||||
print(' - Loading All Survex Blocks...')
|
||||
|
||||
print(' - redirecting stdout to loadsurvexblks.log...')
|
||||
stdout_orig = sys.stdout
|
||||
# Redirect sys.stdout to the file
|
||||
sys.stdout = open('loadsurvexblks.log', 'w')
|
||||
|
||||
survexfile = models.SurvexFile(path="all", cave=None)
|
||||
survexfile = models.SurvexFile(path=settings.SURVEX_TOPNAME, cave=None)
|
||||
survexfile.save()
|
||||
survexfile.SetDirectory()
|
||||
|
||||
#Load all
|
||||
# this is the first so id=1
|
||||
survexblockroot = models.SurvexBlock(name="root", survexpath="", begin_char=0, cave=None, survexfile=survexfile, totalleglength=0.0)
|
||||
survexblockroot.save()
|
||||
fin = survexfile.OpenFile()
|
||||
textlines = [ ]
|
||||
# The real work starts here
|
||||
RecursiveLoad(survexblockroot, survexfile, fin, textlines)
|
||||
survexblockroot.text = "".join(textlines)
|
||||
fin.close()
|
||||
survexblockroot.totalleglength = survexlegsalllength
|
||||
survexblockroot.text = str(survexlegsnumber)
|
||||
#survexblockroot.text = "".join(textlines) these are all blank
|
||||
survexblockroot.save()
|
||||
|
||||
|
||||
#Load each cave,
|
||||
#FIXME this should be dealt with load all above
|
||||
print(" - Reloading all caves")
|
||||
caves = models.Cave.objects.all()
|
||||
for cave in caves:
|
||||
if cave.kataster_number and os.path.isdir(os.path.join(settings.SURVEX_DATA, "caves-" + cave.kat_area(), cave.kataster_number)):
|
||||
if cave.kataster_number not in ['40']:
|
||||
print("loading", cave, cave.kat_area())
|
||||
ReloadSurvexCave(cave.kataster_number, cave.kat_area())
|
||||
|
||||
# Close the file
|
||||
sys.stdout.close()
|
||||
print("+", file=sys.stderr)
|
||||
sys.stderr.flush();
|
||||
|
||||
# Restore sys.stdout to our old saved file handler
|
||||
sys.stdout = stdout_orig
|
||||
print(" - total number of survex legs: {}".format(survexlegsnumber))
|
||||
print(" - total leg lengths loaded: {}m".format(survexlegsalllength))
|
||||
print(' - Loaded All Survex Blocks.')
|
||||
|
||||
|
||||
poslineregex = re.compile(r"^\(\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*)\s*\)\s*([^\s]+)$")
|
||||
|
||||
|
||||
def LoadPos():
|
||||
|
||||
print('Loading Pos....')
|
||||
|
||||
call([settings.CAVERN, "--output=%s/all.3d" % settings.SURVEX_DATA, "%s/all.svx" % settings.SURVEX_DATA])
|
||||
call([settings.THREEDTOPOS, '%sall.3d' % settings.SURVEX_DATA], cwd = settings.SURVEX_DATA)
|
||||
posfile = open("%sall.pos" % settings.SURVEX_DATA)
|
||||
posfile.readline() #Drop header
|
||||
for line in posfile.readlines():
|
||||
r = poslineregex.match(line)
|
||||
if r:
|
||||
x, y, z, name = r.groups()
|
||||
"""Run cavern to produce a complete .3d file, then run 3dtopos to produce a table of
|
||||
all survey point positions. Then lookup each position by name to see if we have it in the database
|
||||
and if we do, then save the x/y/z coordinates.
|
||||
If we don't have it in the database, print an error message and discard it.
|
||||
"""
|
||||
topdata = settings.SURVEX_DATA + settings.SURVEX_TOPNAME
|
||||
print(' - Generating a list of Pos from %s.svx and then loading...' % (topdata))
|
||||
|
||||
# Be careful with the cache file.
|
||||
# If LoadPos has been run before,
|
||||
# but without cave import being run before,
|
||||
# then *everything* may be in the fresh 'not found' cache file.
|
||||
|
||||
cachefile = settings.SURVEX_DATA + "posnotfound.cache"
|
||||
notfoundbefore = {}
|
||||
if os.path.isfile(cachefile):
|
||||
# this is not a good test. 1623.svx may never change but *included files may have done.
|
||||
# When the *include is unrolled, we will be able to get a proper timestamp to use
|
||||
# and can increase the timeout from 3 days to 30 days.
|
||||
updtsvx = os.path.getmtime(topdata + ".svx")
|
||||
updtcache = os.path.getmtime(cachefile)
|
||||
age = updtcache - updtsvx
|
||||
print(' svx: %s cache: %s not-found cache is fresher by: %s' % (updtsvx, updtcache, str(timedelta(seconds=age) )))
|
||||
|
||||
now = time.time()
|
||||
if now - updtcache > 3*24*60*60:
|
||||
print( " cache is more than 3 days old. Deleting.")
|
||||
os.remove(cachefile)
|
||||
elif age < 0 :
|
||||
print(" cache is stale. Deleting.")
|
||||
os.remove(cachefile)
|
||||
else:
|
||||
print(" cache is fresh. Reading...")
|
||||
try:
|
||||
ss = models.SurvexStation.objects.lookup(name)
|
||||
ss.x = float(x)
|
||||
ss.y = float(y)
|
||||
ss.z = float(z)
|
||||
ss.save()
|
||||
with open(cachefile, "r") as f:
|
||||
for line in f:
|
||||
l = line.rstrip()
|
||||
if l in notfoundbefore:
|
||||
notfoundbefore[l] +=1 # should not be duplicates
|
||||
print(" DUPLICATE ", line, notfoundbefore[l])
|
||||
else:
|
||||
notfoundbefore[l] =1
|
||||
except:
|
||||
print("%s not parsed in survex" % name)
|
||||
print(" FAILURE READ opening cache file %s" % (cachefile))
|
||||
raise
|
||||
|
||||
|
||||
notfoundnow =[]
|
||||
found = 0
|
||||
skip = {}
|
||||
print("\n") # extra line because cavern overwrites the text buffer somehow
|
||||
# cavern defaults to using same cwd as supplied input file
|
||||
call([settings.CAVERN, "--output=%s.3d" % (topdata), "%s.svx" % (topdata)])
|
||||
call([settings.THREEDTOPOS, '%s.3d' % (topdata)], cwd = settings.SURVEX_DATA)
|
||||
print(" - This next bit takes a while. Matching ~32,000 survey positions. Be patient...")
|
||||
|
||||
mappoints = {}
|
||||
for pt in MapLocations().points():
|
||||
svxid, number, point_type, label = pt
|
||||
mappoints[svxid]=True
|
||||
|
||||
posfile = open("%s.pos" % (topdata))
|
||||
posfile.readline() #Drop header
|
||||
|
||||
survexblockroot = models_survex.SurvexBlock.objects.get(id=1)
|
||||
for line in posfile.readlines():
|
||||
r = poslineregex.match(line)
|
||||
if r:
|
||||
x, y, z, id = r.groups()
|
||||
if id in notfoundbefore:
|
||||
skip[id] = 1
|
||||
else:
|
||||
for sid in mappoints:
|
||||
if id.endswith(sid):
|
||||
notfoundnow.append(id)
|
||||
# Now that we don't import any stations, we create it rather than look it up
|
||||
# ss = models_survex.SurvexStation.objects.lookup(id)
|
||||
|
||||
# need to set block_id which means doing a search on all the survex blocks..
|
||||
# remove dot at end and add one at beginning
|
||||
blockpath = "." + id[:-len(sid)].strip(".")
|
||||
try:
|
||||
sbqs = models_survex.SurvexBlock.objects.filter(survexpath=blockpath)
|
||||
if len(sbqs)==1:
|
||||
sb = sbqs[0]
|
||||
if len(sbqs)>1:
|
||||
message = ' ! MULTIPLE SurvexBlocks matching Entrance point {} {}'.format(blockpath, sid)
|
||||
print(message)
|
||||
models.DataIssue.objects.create(parser='survex', message=message)
|
||||
sb = sbqs[0]
|
||||
elif len(sbqs)<=0:
|
||||
message = ' ! ZERO SurvexBlocks matching Entrance point {} {}'.format(blockpath, sid)
|
||||
print(message)
|
||||
models.DataIssue.objects.create(parser='survex', message=message)
|
||||
sb = survexblockroot
|
||||
except:
|
||||
message = ' ! FAIL in getting SurvexBlock matching Entrance point {} {}'.format(blockpath, sid)
|
||||
print(message)
|
||||
models.DataIssue.objects.create(parser='survex', message=message)
|
||||
try:
|
||||
ss = models_survex.SurvexStation(name=id, block=sb)
|
||||
ss.x = float(x)
|
||||
ss.y = float(y)
|
||||
ss.z = float(z)
|
||||
ss.save()
|
||||
found += 1
|
||||
except:
|
||||
message = ' ! FAIL to create SurvexStation Entrance point {} {}'.format(blockpath, sid)
|
||||
print(message)
|
||||
models.DataIssue.objects.create(parser='survex', message=message)
|
||||
raise
|
||||
|
||||
#print(" - %s failed lookups of SurvexStation.objects. %s found. %s skipped." % (len(notfoundnow),found, len(skip)))
|
||||
|
||||
if found > 10: # i.e. a previous cave import has been done
|
||||
try:
|
||||
with open(cachefile, "w") as f:
|
||||
c = len(notfoundnow)+len(skip)
|
||||
for i in notfoundnow:
|
||||
pass #f.write("%s\n" % i)
|
||||
for j in skip:
|
||||
pass #f.write("%s\n" % j) # NB skip not notfoundbefore
|
||||
print((' Not-found cache file written: %s entries' % c))
|
||||
except:
|
||||
print(" FAILURE WRITE opening cache file %s" % (cachefile))
|
||||
raise
|
||||
|
||||
@@ -1,16 +1,21 @@
|
||||
import sys, os, types, logging, stat
|
||||
#sys.path.append('C:\\Expo\\expoweb')
|
||||
#from troggle import *
|
||||
#os.environ['DJANGO_SETTINGS_MODULE']='troggle.settings'
|
||||
import settings
|
||||
from troggle.core.models import *
|
||||
from PIL import Image
|
||||
#import settings
|
||||
#import core.models as models
|
||||
from __future__ import (absolute_import, division,
|
||||
print_function, unicode_literals)
|
||||
|
||||
import sys
|
||||
import os
|
||||
import types
|
||||
import logging
|
||||
import stat
|
||||
import csv
|
||||
import re
|
||||
import datetime
|
||||
|
||||
#from PIL import Image
|
||||
from utils import save_carefully
|
||||
from functools import reduce
|
||||
|
||||
import settings
|
||||
from troggle.core.models import *
|
||||
|
||||
def get_or_create_placeholder(year):
|
||||
""" All surveys must be related to a logbookentry. We don't have a way to
|
||||
@@ -24,142 +29,89 @@ def get_or_create_placeholder(year):
|
||||
placeholder_logbook_entry, newly_created = save_carefully(LogbookEntry, lookupAttribs, nonLookupAttribs)
|
||||
return placeholder_logbook_entry
|
||||
|
||||
# dead
|
||||
def readSurveysFromCSV():
|
||||
try: # could probably combine these two
|
||||
surveytab = open(os.path.join(settings.SURVEY_SCANS, "Surveys.csv"))
|
||||
except IOError:
|
||||
import cStringIO, urllib
|
||||
surveytab = cStringIO.StringIO(urllib.urlopen(settings.SURVEY_SCANS + "/Surveys.csv").read())
|
||||
dialect=csv.Sniffer().sniff(surveytab.read())
|
||||
surveytab.seek(0,0)
|
||||
surveyreader = csv.reader(surveytab,dialect=dialect)
|
||||
headers = surveyreader.next()
|
||||
header = dict(zip(headers, range(len(headers)))) #set up a dictionary where the indexes are header names and the values are column numbers
|
||||
|
||||
# test if the expeditions have been added yet
|
||||
if Expedition.objects.count()==0:
|
||||
print("There are no expeditions in the database. Please run the logbook parser.")
|
||||
sys.exit()
|
||||
|
||||
|
||||
logging.info("Deleting all scanned images")
|
||||
ScannedImage.objects.all().delete()
|
||||
|
||||
|
||||
logging.info("Deleting all survey objects")
|
||||
Survey.objects.all().delete()
|
||||
|
||||
|
||||
logging.info("Beginning to import surveys from "+str(os.path.join(settings.SURVEYS, "Surveys.csv"))+"\n"+"-"*60+"\n")
|
||||
|
||||
for survey in surveyreader:
|
||||
#I hate this, but some surveys have a letter eg 2000#34a. The next line deals with that.
|
||||
walletNumberLetter = re.match(r'(?P<number>\d*)(?P<letter>[a-zA-Z]*)',survey[header['Survey Number']])
|
||||
# print(walletNumberLetter.groups())
|
||||
year=survey[header['Year']]
|
||||
|
||||
|
||||
surveyobj = Survey(
|
||||
expedition = Expedition.objects.filter(year=year)[0],
|
||||
wallet_number = walletNumberLetter.group('number'),
|
||||
logbook_entry = get_or_create_placeholder(year),
|
||||
comments = survey[header['Comments']],
|
||||
location = survey[header['Location']]
|
||||
)
|
||||
surveyobj.wallet_letter = walletNumberLetter.group('letter')
|
||||
if survey[header['Finished']]=='Yes':
|
||||
#try and find the sketch_scan
|
||||
pass
|
||||
surveyobj.save()
|
||||
|
||||
|
||||
logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r")
|
||||
|
||||
# dead
|
||||
def listdir(*directories):
|
||||
try:
|
||||
return os.listdir(os.path.join(settings.SURVEYS, *directories))
|
||||
except:
|
||||
import urllib
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
|
||||
folders = urllib.urlopen(url.replace("#", "%23")).readlines()
|
||||
folders = urllib.request.urlopen(url.replace("#", "%23")).readlines()
|
||||
return [folder.rstrip(r"/") for folder in folders]
|
||||
|
||||
# add survey scans
|
||||
def parseSurveyScans(expedition, logfile=None):
|
||||
# yearFileList = listdir(expedition.year)
|
||||
try:
|
||||
yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year)
|
||||
yearFileList=os.listdir(yearPath)
|
||||
print(yearFileList)
|
||||
for surveyFolder in yearFileList:
|
||||
try:
|
||||
surveyNumber=re.match(r'\d\d\d\d#(X?)0*(\d+)',surveyFolder).groups()
|
||||
#scanList = listdir(expedition.year, surveyFolder)
|
||||
scanList=os.listdir(os.path.join(yearPath,surveyFolder))
|
||||
except AttributeError:
|
||||
print(surveyFolder + " ignored\r",)
|
||||
continue
|
||||
# def parseSurveyScans(expedition, logfile=None):
|
||||
# # yearFileList = listdir(expedition.year)
|
||||
# try:
|
||||
# yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year)
|
||||
# yearFileList=os.listdir(yearPath)
|
||||
# print(yearFileList)
|
||||
# for surveyFolder in yearFileList:
|
||||
# try:
|
||||
# surveyNumber=re.match(rb'\d\d\d\d#(X?)0*(\d+)',surveyFolder).groups()
|
||||
# #scanList = listdir(expedition.year, surveyFolder)
|
||||
# scanList=os.listdir(os.path.join(yearPath,surveyFolder))
|
||||
# except AttributeError:
|
||||
# print(("Ignoring file in year folder: " + surveyFolder + "\r"))
|
||||
# continue
|
||||
|
||||
for scan in scanList:
|
||||
try:
|
||||
scanChopped=re.match(r'(?i).*(notes|elev|plan|elevation|extend)(\d*)\.(png|jpg|jpeg)',scan).groups()
|
||||
scanType,scanNumber,scanFormat=scanChopped
|
||||
except AttributeError:
|
||||
print(scan + " ignored\r",)
|
||||
continue
|
||||
if scanType == 'elev' or scanType == 'extend':
|
||||
scanType = 'elevation'
|
||||
# for scan in scanList:
|
||||
# # Why does this insist on renaming all the scanned image files?
|
||||
# # It produces duplicates names and all images have type .jpg in the scanObj.
|
||||
# # It seems to rely on end users being particularly diligent in filenames which is NGtH
|
||||
# try:
|
||||
# #scanChopped=re.match(rb'(?i).*(notes|elev|plan|extend|elevation)-?(\d*)\.(png|jpg|jpeg|pdf)',scan).groups()
|
||||
# scanChopped=re.match(rb'(?i)([a-z_-]*\d?[a-z_-]*)(\d*)\.(png|jpg|jpeg|pdf|top|dxf|svg|tdr|th2|xml|txt)',scan).groups()
|
||||
# scanType,scanNumber,scanFormat=scanChopped
|
||||
# except AttributeError:
|
||||
# print(("Ignored (bad name format): " + surveyFolder + '/' + scan + "\r"))
|
||||
# continue
|
||||
# scanTest = scanType
|
||||
# scanType = 'notes'
|
||||
# match = re.search(rb'(?i)(elev|extend)',scanTest)
|
||||
# if match:
|
||||
# scanType = 'elevation'
|
||||
|
||||
if scanNumber=='':
|
||||
scanNumber=1
|
||||
# match = re.search(rb'(?i)(plan)',scanTest)
|
||||
# if match:
|
||||
# scanType = 'plan'
|
||||
|
||||
if type(surveyNumber)==types.TupleType:
|
||||
surveyLetter=surveyNumber[0]
|
||||
surveyNumber=surveyNumber[1]
|
||||
try:
|
||||
placeholder=get_or_create_placeholder(year=int(expedition.year))
|
||||
survey=Survey.objects.get_or_create(wallet_number=surveyNumber, wallet_letter=surveyLetter, expedition=expedition, defaults={'logbook_entry':placeholder})[0]
|
||||
except Survey.MultipleObjectsReturned:
|
||||
survey=Survey.objects.filter(wallet_number=surveyNumber, wallet_letter=surveyLetter, expedition=expedition)[0]
|
||||
file_=os.path.join(yearPath, surveyFolder, scan)
|
||||
scanObj = ScannedImage(
|
||||
file=file_,
|
||||
contents=scanType,
|
||||
number_in_wallet=scanNumber,
|
||||
survey=survey,
|
||||
new_since_parsing=False,
|
||||
)
|
||||
print("Added scanned image at " + str(scanObj))
|
||||
#if scanFormat=="png":
|
||||
#if isInterlacedPNG(os.path.join(settings.SURVEY_SCANS, "surveyscans", file_)):
|
||||
# print file_+ " is an interlaced PNG. No can do."
|
||||
#continue
|
||||
scanObj.save()
|
||||
except (IOError, OSError):
|
||||
yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year)
|
||||
print("No folder found for " + expedition.year + " at:- " + yearPath)
|
||||
# if scanNumber=='':
|
||||
# scanNumber=1
|
||||
|
||||
# if isinstance(surveyNumber, tuple):
|
||||
# surveyLetter=surveyNumber[0]
|
||||
# surveyNumber=surveyNumber[1]
|
||||
# try:
|
||||
# placeholder=get_or_create_placeholder(year=int(expedition.year))
|
||||
# survey=Survey.objects.get_or_create(wallet_number=surveyNumber, wallet_letter=surveyLetter, expedition=expedition, defaults={'logbook_entry':placeholder})[0]
|
||||
# except Survey.MultipleObjectsReturned:
|
||||
# survey=Survey.objects.filter(wallet_number=surveyNumber, wallet_letter=surveyLetter, expedition=expedition)[0]
|
||||
# file_=os.path.join(yearPath, surveyFolder, scan)
|
||||
# scanObj = ScannedImage(
|
||||
# file=file_,
|
||||
# contents=scanType,
|
||||
# number_in_wallet=scanNumber,
|
||||
# survey=survey,
|
||||
# new_since_parsing=False,
|
||||
# )
|
||||
# print(("Added scanned image at " + str(scanObj)))
|
||||
# #if scanFormat=="png":
|
||||
# #if isInterlacedPNG(os.path.join(settings.SURVEY_SCANS, "surveyscans", file_)):
|
||||
# # print file_+ " is an interlaced PNG. No can do."
|
||||
# #continue
|
||||
# scanObj.save()
|
||||
# except (IOError, OSError):
|
||||
# yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year)
|
||||
# print((" ! No folder found for " + expedition.year + " at:- " + yearPath))
|
||||
|
||||
# dead
|
||||
def parseSurveys(logfile=None):
|
||||
try:
|
||||
readSurveysFromCSV()
|
||||
except (IOError, OSError):
|
||||
print("Survey CSV not found..")
|
||||
pass
|
||||
|
||||
for expedition in Expedition.objects.filter(year__gte=2000): #expos since 2000, because paths and filenames were nonstandard before then
|
||||
parseSurveyScans(expedition)
|
||||
|
||||
# dead
|
||||
def isInterlacedPNG(filePath): #We need to check for interlaced PNGs because the thumbnail engine can't handle them (uses PIL)
|
||||
file=Image.open(filePath)
|
||||
print(filePath)
|
||||
if 'interlace' in file.info:
|
||||
return file.info['interlace']
|
||||
else:
|
||||
return False
|
||||
# def isInterlacedPNG(filePath): #We need to check for interlaced PNGs because the thumbnail engine can't handle them (uses PIL)
|
||||
# file=Image.open(filePath)
|
||||
# print(filePath)
|
||||
# if 'interlace' in file.info:
|
||||
# return file.info['interlace']
|
||||
# else:
|
||||
# return False
|
||||
|
||||
|
||||
# handles url or file, so we can refer to a set of scans on another server
|
||||
@@ -167,58 +119,63 @@ def GetListDir(sdir):
|
||||
res = [ ]
|
||||
if sdir[:7] == "http://":
|
||||
assert False, "Not written"
|
||||
s = urllib.urlopen(sdir)
|
||||
s = urllib.request.urlopen(sdir)
|
||||
else:
|
||||
for f in os.listdir(sdir):
|
||||
if f[0] != ".":
|
||||
ff = os.path.join(sdir, f)
|
||||
res.append((f, ff, os.path.isdir(ff)))
|
||||
return res
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def LoadListScansFile(survexscansfolder):
|
||||
gld = [ ]
|
||||
|
||||
# flatten out any directories in these book files
|
||||
# flatten out any directories in these wallet folders - should not be any
|
||||
for (fyf, ffyf, fisdiryf) in GetListDir(survexscansfolder.fpath):
|
||||
if fisdiryf:
|
||||
gld.extend(GetListDir(ffyf))
|
||||
else:
|
||||
gld.append((fyf, ffyf, fisdiryf))
|
||||
|
||||
c=0
|
||||
for (fyf, ffyf, fisdiryf) in gld:
|
||||
#assert not fisdiryf, ffyf
|
||||
if re.search(r"\.(?:png|jpg|jpeg)(?i)$", fyf):
|
||||
if re.search(r"\.(?:png|jpg|jpeg|pdf|svg|gif)(?i)$", fyf):
|
||||
survexscansingle = SurvexScanSingle(ffile=ffyf, name=fyf, survexscansfolder=survexscansfolder)
|
||||
survexscansingle.save()
|
||||
c+=1
|
||||
if c>=10:
|
||||
print(".", end='')
|
||||
c = 0
|
||||
|
||||
|
||||
# this iterates through the scans directories (either here or on the remote server)
|
||||
# and builds up the models we can access later
|
||||
def LoadListScans():
|
||||
|
||||
print('Loading Survey Scans...')
|
||||
print(' - Loading Survey Scans')
|
||||
|
||||
SurvexScanSingle.objects.all().delete()
|
||||
SurvexScansFolder.objects.all().delete()
|
||||
print(' - deleting all scansFolder and scansSingle objects')
|
||||
|
||||
# first do the smkhs (large kh survey scans) directory
|
||||
survexscansfoldersmkhs = SurvexScansFolder(fpath=os.path.join(settings.SURVEY_SCANS, "smkhs"), walletname="smkhs")
|
||||
survexscansfoldersmkhs = SurvexScansFolder(fpath=os.path.join(settings.SURVEY_SCANS, "../surveys/smkhs"), walletname="smkhs")
|
||||
print("smkhs", end=' ')
|
||||
if os.path.isdir(survexscansfoldersmkhs.fpath):
|
||||
survexscansfoldersmkhs.save()
|
||||
LoadListScansFile(survexscansfoldersmkhs)
|
||||
|
||||
|
||||
# iterate into the surveyscans directory
|
||||
for f, ff, fisdir in GetListDir(os.path.join(settings.SURVEY_SCANS, "surveyscans")):
|
||||
print(' - ', end=' ')
|
||||
for f, ff, fisdir in GetListDir(settings.SURVEY_SCANS):
|
||||
if not fisdir:
|
||||
continue
|
||||
|
||||
# do the year folders
|
||||
if re.match(r"\d\d\d\d$", f):
|
||||
print("%s" % f, end=' ')
|
||||
for fy, ffy, fisdiry in GetListDir(ff):
|
||||
if fisdiry:
|
||||
assert fisdiry, ffy
|
||||
@@ -235,7 +192,7 @@ def LoadListScans():
|
||||
|
||||
def FindTunnelScan(tunnelfile, path):
|
||||
scansfolder, scansfile = None, None
|
||||
mscansdir = re.search(r"(\d\d\d\d#X?\d+\w?|1995-96kh|92-94Surveybookkh|1991surveybook|smkhs)/(.*?(?:png|jpg))$", path)
|
||||
mscansdir = re.search(r"(\d\d\d\d#X?\d+\w?|1995-96kh|92-94Surveybookkh|1991surveybook|smkhs)/(.*?(?:png|jpg|pdf|jpeg))$", path)
|
||||
if mscansdir:
|
||||
scansfolderl = SurvexScansFolder.objects.filter(walletname=mscansdir.group(1))
|
||||
if len(scansfolderl):
|
||||
@@ -244,8 +201,11 @@ def FindTunnelScan(tunnelfile, path):
|
||||
if scansfolder:
|
||||
scansfilel = scansfolder.survexscansingle_set.filter(name=mscansdir.group(2))
|
||||
if len(scansfilel):
|
||||
print(scansfilel, len(scansfilel))
|
||||
assert len(scansfilel) == 1
|
||||
if len(scansfilel) > 1:
|
||||
print("BORK more than one image filename matches filter query. ", scansfilel[0])
|
||||
print("BORK ", tunnelfile.tunnelpath, path)
|
||||
print("BORK ", mscansdir.group(1), mscansdir.group(2), len(scansfilel))
|
||||
#assert len(scansfilel) == 1
|
||||
scansfile = scansfilel[0]
|
||||
|
||||
if scansfolder:
|
||||
@@ -253,9 +213,9 @@ def FindTunnelScan(tunnelfile, path):
|
||||
if scansfile:
|
||||
tunnelfile.survexscans.add(scansfile)
|
||||
|
||||
elif path and not re.search(r"\.(?:png|jpg|jpeg)$(?i)", path):
|
||||
elif path and not re.search(r"\.(?:png|jpg|pdf|jpeg)$(?i)", path):
|
||||
name = os.path.split(path)[1]
|
||||
print("ttt", tunnelfile.tunnelpath, path, name)
|
||||
#print("debug-tunnelfileobjects ", tunnelfile.tunnelpath, path, name)
|
||||
rtunnelfilel = TunnelFile.objects.filter(tunnelname=name)
|
||||
if len(rtunnelfilel):
|
||||
assert len(rtunnelfilel) == 1, ("two paths with name of", path, "need more discrimination coded")
|
||||
@@ -269,19 +229,22 @@ def FindTunnelScan(tunnelfile, path):
|
||||
def SetTunnelfileInfo(tunnelfile):
|
||||
ff = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath)
|
||||
tunnelfile.filesize = os.stat(ff)[stat.ST_SIZE]
|
||||
fin = open(ff)
|
||||
fin = open(ff,'rb')
|
||||
ttext = fin.read()
|
||||
fin.close()
|
||||
|
||||
mtype = re.search("<(fontcolours|sketch)", ttext)
|
||||
if tunnelfile.filesize <= 0:
|
||||
print("DEBUG - zero length xml file", ff)
|
||||
return
|
||||
mtype = re.search(r"<(fontcolours|sketch)", ttext)
|
||||
|
||||
assert mtype, ff
|
||||
tunnelfile.bfontcolours = (mtype.group(1)=="fontcolours")
|
||||
tunnelfile.npaths = len(re.findall("<skpath", ttext))
|
||||
tunnelfile.npaths = len(re.findall(r"<skpath", ttext))
|
||||
tunnelfile.save()
|
||||
|
||||
# <tunnelxml tunnelversion="version2009-06-21 Matienzo" tunnelproject="ireby" tunneluser="goatchurch" tunneldate="2009-06-29 23:22:17">
|
||||
# <pcarea area_signal="frame" sfscaledown="12.282584" sfrotatedeg="-90.76982" sfxtrans="11.676667377221136" sfytrans="-15.677173422877454" sfsketch="204description/scans/plan(38).png" sfstyle="" nodeconnzsetrelative="0.0">
|
||||
for path, style in re.findall('<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext):
|
||||
for path, style in re.findall(r'<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext):
|
||||
FindTunnelScan(tunnelfile, path)
|
||||
|
||||
# should also scan and look for survex blocks that might have been included
|
||||
|
||||
60
pathreport.py
Normal file
60
pathreport.py
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/python
|
||||
from settings import *
|
||||
import sys
|
||||
import os
|
||||
import string
|
||||
import re
|
||||
import urlparse
|
||||
import django
|
||||
|
||||
pathsdict={
|
||||
"ADMIN_MEDIA_PREFIX" : ADMIN_MEDIA_PREFIX,
|
||||
"ADMIN_MEDIA_PREFIX" : ADMIN_MEDIA_PREFIX,
|
||||
"CAVEDESCRIPTIONSX" : CAVEDESCRIPTIONS,
|
||||
"DIR_ROOT" : DIR_ROOT,
|
||||
#"EMAIL_HOST" : EMAIL_HOST,
|
||||
#"EMAIL_HOST_USER" : EMAIL_HOST_USER,
|
||||
"ENTRANCEDESCRIPTIONS" : ENTRANCEDESCRIPTIONS,
|
||||
"EXPOUSER_EMAIL" : EXPOUSER_EMAIL,
|
||||
"EXPOUSERPASS" :"<redacted>",
|
||||
"EXPOUSER" : EXPOUSER,
|
||||
"EXPOWEB" : EXPOWEB,
|
||||
"EXPOWEB_URL" : EXPOWEB_URL,
|
||||
"FILES" : FILES,
|
||||
"JSLIB_URL" : JSLIB_URL,
|
||||
"LOGFILE" : LOGFILE,
|
||||
"LOGIN_REDIRECT_URL" : LOGIN_REDIRECT_URL,
|
||||
"MEDIA_ADMIN_DIR" : MEDIA_ADMIN_DIR,
|
||||
"MEDIA_ROOT" : MEDIA_ROOT,
|
||||
"MEDIA_URL" : MEDIA_URL,
|
||||
#"PHOTOS_ROOT" : PHOTOS_ROOT,
|
||||
"PHOTOS_URL" : PHOTOS_URL,
|
||||
"PYTHON_PATH" : PYTHON_PATH,
|
||||
"REPOS_ROOT_PATH" : REPOS_ROOT_PATH,
|
||||
"ROOT_URLCONF" : ROOT_URLCONF,
|
||||
"STATIC_ROOT" : STATIC_ROOT,
|
||||
"STATIC_URL" : STATIC_URL,
|
||||
"SURVEX_DATA" : SURVEX_DATA,
|
||||
"SURVEY_SCANS" : SURVEY_SCANS,
|
||||
"SURVEYS" : SURVEYS,
|
||||
"SURVEYS_URL" : SURVEYS_URL,
|
||||
"SVX_URL" : SVX_URL,
|
||||
"TEMPLATE_DIRS" : TEMPLATE_DIRS,
|
||||
"THREEDCACHEDIR" : THREEDCACHEDIR,
|
||||
"TINY_MCE_MEDIA_ROOT" : TINY_MCE_MEDIA_ROOT,
|
||||
"TINY_MCE_MEDIA_URL" : TINY_MCE_MEDIA_URL,
|
||||
"TUNNEL_DATA" : TUNNEL_DATA,
|
||||
"URL_ROOT" : URL_ROOT
|
||||
}
|
||||
|
||||
sep="\r\t\t\t" # ugh nasty - terminal output only
|
||||
sep2="\r\t\t\t\t\t\t\t" # ugh nasty - terminal output only
|
||||
|
||||
bycodes = sorted(pathsdict)
|
||||
for p in bycodes:
|
||||
print p, sep , pathsdict[p]
|
||||
|
||||
byvals = sorted(pathsdict, key=pathsdict.__getitem__)
|
||||
for p in byvals:
|
||||
print pathsdict[p] , sep2, p
|
||||
|
||||
7
requirements.txt
Normal file
7
requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
Django==1.7
|
||||
django-extensions==2.2.9
|
||||
django-registration==2.0
|
||||
django-tinymce==2.0.1
|
||||
six==1.14.0
|
||||
Unidecode==1.1.1
|
||||
Pillow==7.1.2
|
||||
51
settings.py
Normal file → Executable file
51
settings.py
Normal file → Executable file
@@ -1,4 +1,5 @@
|
||||
from localsettings import * #inital localsettings call so that urljoins work
|
||||
from localsettings import *
|
||||
#inital localsettings call so that urljoins work
|
||||
import os
|
||||
import urlparse
|
||||
import django
|
||||
@@ -10,7 +11,7 @@ BASE_DIR = os.path.dirname(os.path.dirname(__file__))
|
||||
DEBUG = True
|
||||
TEMPLATE_DEBUG = DEBUG
|
||||
|
||||
ALLOWED_HOSTS = []
|
||||
ALLOWED_HOSTS = [u'expo.survex.com']
|
||||
|
||||
ADMINS = (
|
||||
# ('Your Name', 'your_email@domain.com'),
|
||||
@@ -44,15 +45,53 @@ NOTABLECAVESHREFS = [ "161", "204", "258", "76", "107", "264" ]
|
||||
# trailing slash.
|
||||
# Examples: "http://foo.com/media/", "/media/".
|
||||
ADMIN_MEDIA_PREFIX = '/troggle/media-admin/'
|
||||
PHOTOS_ROOT = os.path.join(EXPOWEB, 'photos')
|
||||
CAVEDESCRIPTIONS = os.path.join(EXPOWEB, "noinfo", "cave_data")
|
||||
ENTRANCEDESCRIPTIONS = os.path.join(EXPOWEB, "noinfo", "entrance_data")
|
||||
#PHOTOS_ROOT = os.path.join(EXPOWEB, 'mugshot-data')
|
||||
CAVEDESCRIPTIONS = os.path.join(EXPOWEB, "cave_data")
|
||||
ENTRANCEDESCRIPTIONS = os.path.join(EXPOWEB, "entrance_data")
|
||||
|
||||
MEDIA_URL = urlparse.urljoin(URL_ROOT , '/site_media/')
|
||||
SURVEYS_URL = urlparse.urljoin(URL_ROOT , '/survey_scans/')
|
||||
PHOTOS_URL = urlparse.urljoin(URL_ROOT , '/photos/')
|
||||
SVX_URL = urlparse.urljoin(URL_ROOT , '/survex/')
|
||||
|
||||
# top-level survex file basename (without .svx)
|
||||
SURVEX_TOPNAME = "1623"
|
||||
|
||||
DEFAULT_LOGBOOK_PARSER = "Parseloghtmltxt"
|
||||
DEFAULT_LOGBOOK_FILE = "logbook.html"
|
||||
|
||||
LOGBOOK_PARSER_SETTINGS = {
|
||||
"2019": ("2019/logbook.html", "Parseloghtmltxt"),
|
||||
"2018": ("2018/logbook.html", "Parseloghtmltxt"),
|
||||
"2017": ("2017/logbook.html", "Parseloghtmltxt"),
|
||||
"2016": ("2016/logbook.html", "Parseloghtmltxt"),
|
||||
"2015": ("2015/logbook.html", "Parseloghtmltxt"),
|
||||
"2014": ("2014/logbook.html", "Parseloghtmltxt"),
|
||||
"2013": ("2013/logbook.html", "Parseloghtmltxt"),
|
||||
"2012": ("2012/logbook.html", "Parseloghtmltxt"),
|
||||
"2011": ("2011/logbook.html", "Parseloghtmltxt"),
|
||||
"2010": ("2010/logbook.html", "Parseloghtmltxt"),
|
||||
"2009": ("2009/2009logbook.txt", "Parselogwikitxt"),
|
||||
"2008": ("2008/2008logbook.txt", "Parselogwikitxt"),
|
||||
"2007": ("2007/logbook.html", "Parseloghtmltxt"),
|
||||
"2006": ("2006/logbook/logbook_06.txt", "Parselogwikitxt"),
|
||||
"2005": ("2005/logbook.html", "Parseloghtmltxt"),
|
||||
"2004": ("2004/logbook.html", "Parseloghtmltxt"),
|
||||
"2003": ("2003/logbook.html", "Parseloghtml03"),
|
||||
"2002": ("2002/logbook.html", "Parseloghtmltxt"),
|
||||
"2001": ("2001/log.htm", "Parseloghtml01"),
|
||||
"2000": ("2000/log.htm", "Parseloghtml01"),
|
||||
"1999": ("1999/log.htm", "Parseloghtml01"),
|
||||
"1998": ("1998/log.htm", "Parseloghtml01"),
|
||||
"1997": ("1997/log.htm", "Parseloghtml01"),
|
||||
"1996": ("1996/log.htm", "Parseloghtml01"),
|
||||
"1995": ("1995/log.htm", "Parseloghtml01"),
|
||||
"1994": ("1994/log.htm", "Parseloghtml01"),
|
||||
"1993": ("1993/log.htm", "Parseloghtml01"),
|
||||
"1992": ("1992/log.htm", "Parseloghtml01"),
|
||||
"1991": ("1991/log.htm", "Parseloghtml01"),
|
||||
}
|
||||
|
||||
APPEND_SLASH = False
|
||||
SMART_APPEND_SLASH = True
|
||||
|
||||
@@ -92,7 +131,7 @@ INSTALLED_APPS = (
|
||||
'troggle.profiles',
|
||||
'troggle.core',
|
||||
'troggle.flatpages',
|
||||
'imagekit',
|
||||
#'troggle.imagekit',
|
||||
)
|
||||
|
||||
MIDDLEWARE_CLASSES = (
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN">
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"/>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
<link rel="stylesheet" type="text/css" href="{{ settings.MEDIA_URL }}css/main3.css" title="eyeCandy"/>
|
||||
<link rel="alternate stylesheet" type="text/css" href="{{ settings.MEDIA_URL }}css/mainplain.css" title="plain"/>
|
||||
<link rel="stylesheet" type="text/css" href="{{ settings.MEDIA_URL }}css/dropdownNavStyle.css" />
|
||||
<title>{% block title %}Troggle{% endblock %}</title>
|
||||
<script src="{{ settings.JSLIB_URL }}jquery/jquery.min.js" type="text/javascript"></script>
|
||||
<!-- <script src="{{ settings.JSLIB_URL }}jquery/jquery.min.js" type="text/javascript"></script> -->
|
||||
<script src="{{ settings.MEDIA_URL }}js/jquery.quicksearch.js" type="text/javascript"></script>
|
||||
<script src="{{ settings.MEDIA_URL }}js/base.js" type="text/javascript"></script>
|
||||
<script src="{{ settings.MEDIA_URL }}js/jquery.dropdownPlain.js" type="text/javascript"></script>
|
||||
@@ -16,7 +16,7 @@
|
||||
<body onLoad="contentHeight();">
|
||||
|
||||
<div id="header">
|
||||
<h1>CUCC Expeditions to Austria: 1976 - 2018</h1>
|
||||
<h1>CUCC Expeditions to Austria: 1976 - 2020</h1>
|
||||
<div id="editLinks"> {% block loginInfo %}
|
||||
<a href="{{settings.EXPOWEB_URL}}">Website home</a> |
|
||||
{% if user.username %}
|
||||
@@ -35,14 +35,14 @@
|
||||
<a href="{% url "survexcaveslist" %}">All Survex</a> |
|
||||
<a href="{% url "surveyscansfolders" %}">Scans</a> |
|
||||
<a href="{% url "tunneldata" %}">Tunneldata</a> |
|
||||
<a href="{% url "survexcavessingle" 107 %}">107</a> |
|
||||
<a href="{% url "survexcavessingle" 161 %}">161</a> |
|
||||
<a href="{% url "survexcavessingle" 204 %}">204</a> |
|
||||
<a href="{% url "survexcavessingle" 258 %}">258</a> |
|
||||
<a href="{% url "survexcavessingle" 264 %}">264</a> |
|
||||
<a href="{% url "expedition" 2016 %}">Expo2016</a> |
|
||||
<a href="{% url "expedition" 2017 %}">Expo2017</a> |
|
||||
<a href="{% url "survexcavessingle" "caves-1623/290/290.svx" %}">290</a> |
|
||||
<a href="{% url "survexcavessingle" "caves-1623/291/291.svx" %}">291</a> |
|
||||
<a href="{% url "survexcavessingle" "caves-1626/359/359.svx" %}">359</a> |
|
||||
<a href="{% url "survexcavessingle" "caves-1623/258/258.svx" %}">258</a> |
|
||||
<a href="{% url "survexcavessingle" "caves-1623/264/264.svx" %}">264</a> |
|
||||
<a href="{% url "expedition" 2018 %}">Expo2018</a> |
|
||||
<a href="{% url "expedition" 2019 %}">Expo2019</a> |
|
||||
<a href="{% url "expedition" 2020 %}">Expo2020</a> |
|
||||
|
||||
<a href="/admin/">Django admin</a>
|
||||
</div>
|
||||
@@ -81,7 +81,7 @@
|
||||
|
||||
<li><a href="#">External links</a>
|
||||
<ul class="sub_menu">
|
||||
<li><a id="cuccLink" href="http://www.srcf.ucam.org/caving/wiki/Main_Page">CUCC website</a></li>
|
||||
<li><a id="cuccLink" href="https://camcaving.uk">CUCC website</a></li>
|
||||
<li><a id="expoWebsiteLink" href="http://expo.survex.com">Expedition website</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
@@ -408,8 +408,8 @@ div#scene {
|
||||
|
||||
</style>
|
||||
|
||||
<script type="text/javascript" src="/CaveView/js/CaveView.js" ></script>
|
||||
<script type="text/javascript" src="/CaveView/lib/proj4.js" ></script>
|
||||
<script type="text/javascript" src="/javascript/CaveView/js/CaveView.js" ></script>
|
||||
<script type="text/javascript" src="/javascript/CaveView/lib/proj4.js" ></script>
|
||||
|
||||
|
||||
<script type="text/javascript" >
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
<html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
|
||||
|
||||
@@ -11,30 +11,34 @@
|
||||
<h3>Notable caves</h3>
|
||||
<ul>
|
||||
{% for cave in notablecaves %}
|
||||
<li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }}{% else %}{{cave.unofficial_number }}{%endif %} {{cave.official_name|safe}}</a> </li>
|
||||
<li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }}{% else %}{{cave.unofficial_number }}{% endif %} {{cave.official_name|safe}}</a> </li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
|
||||
<h3>1623</h3>
|
||||
|
||||
<table class="searchable">
|
||||
{% for cave in caves1623 %}
|
||||
|
||||
<tr><td> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }}{% else %}{{cave.unofficial_number }}{%endif %} {{cave.official_name|safe}}</a> </td></tr>
|
||||
|
||||
{% endfor %}
|
||||
</table>
|
||||
|
||||
<h3>1626</h3>
|
||||
|
||||
<ul class="searchable">
|
||||
{% for cave in caves1626 %}
|
||||
{% for cave in caves1626 %}
|
||||
|
||||
<li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }} {{cave.official_name|safe}}</a> {% if cave.unofficial_number %}({{cave.unofficial_number }}){% endif %}{% else %}{{cave.unofficial_number }} {{cave.official_name|safe}}</a> {% endif %}
|
||||
</li>
|
||||
|
||||
{% endfor %}
|
||||
</ul>
|
||||
<p style="text-align:right">
|
||||
<a href="{% url "newcave" %}">New Cave</a>
|
||||
</p>
|
||||
<h3>1623</h3>
|
||||
|
||||
<li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }}{% else %}{{cave.unofficial_number }}{%endif %} {{cave.official_name|safe}}</a> </li>
|
||||
<table class="searchable">
|
||||
{% for cave in caves1623 %}
|
||||
|
||||
<tr><td> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }} {{cave.official_name|safe}}</a> {% if cave.unofficial_number %}({{cave.unofficial_number }}){% endif %}{% else %}{{cave.unofficial_number }} {{cave.official_name|safe}}</a> {% endif %}</td></tr>
|
||||
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</table>
|
||||
|
||||
<p style="text-align:right">
|
||||
<a href="{% url "newcave" %}">New Cave</a>
|
||||
|
||||
</p>
|
||||
{% endblock %}
|
||||
|
||||
@@ -16,32 +16,52 @@
|
||||
|
||||
{% if error %}
|
||||
<div class="noticeBox">
|
||||
{{ error }}
|
||||
{{ error }}
|
||||
<a href="#" class="closeDiv">dismiss this message</a>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
|
||||
<form name="reset" method="post" action="">
|
||||
<h3>Wipe:</h3>
|
||||
|
||||
<table>
|
||||
<tr><td>Wipe entire database and recreate tables: </td><td><input type="checkbox" name="reload_db" /></td><td> <input type="submit" id="Import" value="I really want to delete all information in troggle, and accept all responsibility."></td></tr>
|
||||
</table>
|
||||
<h3>Wipe:</h3>
|
||||
<table>
|
||||
<tr>
|
||||
<td>Wipe entire database and recreate tables: </td>
|
||||
<td><input type="checkbox" name="reload_db" /></td>
|
||||
<td>
|
||||
<input type="submit" id="Import" value="I really want to delete all information in troggle, and accept all responsibility.">
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</form>
|
||||
<h3>Import (non-destructive):</h3>
|
||||
<form name="import" method="post" action="">
|
||||
<table>
|
||||
<tr><td>people from folk.csv using parsers\people.py</td><td><input type="checkbox" name="import_people"/></td></tr>
|
||||
<tr><td>caves from cavetab2.csv using parsers\cavetab.py</td><td> <input type="checkbox" class="parser" name="import_cavetab"/></td></tr>
|
||||
<tr><td>logbook entries using parsers\logbooks.py</td><td><input type="checkbox" name="import_logbooks"/></td></tr>
|
||||
<tr><td>QMs using parsers\QMs.py</td><td><input type="checkbox" name="import_QMs" /></td></tr>
|
||||
<tr><td>survey scans using parsers\surveys.py</td><td><input type="checkbox" name="import_surveys" /></td></tr>
|
||||
<tr><td>survex data using parsers\survex.py</td><td><input type="checkbox" name="import_survex" /></td></tr>
|
||||
|
||||
</table>
|
||||
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>people from folk.csv using parsers\people.py</td>
|
||||
<td><input type="checkbox" name="import_people"/></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>caves from cavetab2.csv using parsers\cavetab.py</td>
|
||||
<td> <input type="checkbox" class="parser" name="import_cavetab"/></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>logbook entries using parsers\logbooks.py</td>
|
||||
<td><input type="checkbox" name="import_logbooks"/></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>QMs using parsers\QMs.py</td>
|
||||
<td><input type="checkbox" name="import_QMs" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>survey scans using parsers\surveys.py</td>
|
||||
<td><input type="checkbox" name="import_surveys" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>survex data using parsers\survex.py</td>
|
||||
<td><input type="checkbox" name="import_survex" /></td>
|
||||
</tr>
|
||||
</table>
|
||||
<p>
|
||||
<input type="submit" id="Import" value="Import">
|
||||
|
||||
@@ -76,61 +96,44 @@
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>
|
||||
surveys to Surveys.csv
|
||||
<td>
|
||||
surveys to Surveys.csv
|
||||
</td>
|
||||
<td>
|
||||
<td>
|
||||
|
||||
</td>
|
||||
<td>
|
||||
<form name="export" method="get" action={% url "downloadlogbook" %}>
|
||||
<p>Download a logbook file which is dynamically generated by Troggle.</p>
|
||||
|
||||
<p>Download a logbook file which is dynamically generated by Troggle.</p>
|
||||
|
||||
<p>
|
||||
Expedition year:
|
||||
<select name="year">
|
||||
{% for expedition in expeditions %}
|
||||
<option value="{{expedition}}"> {{expedition}} </option>
|
||||
<option value="{{expedition}}"> {{expedition}} </option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Output style:
|
||||
<select name="extension">
|
||||
<option value="txt">.txt file with MediaWiki markup - 2008 style</option>
|
||||
<option value="html">.html file - 2005 style</option>
|
||||
<select name="extension">
|
||||
<option value="txt">.txt file with MediaWiki markup - 2008 style</option>
|
||||
<option value="html">.html file - 2005 style</option>
|
||||
</select>
|
||||
</p>
|
||||
<p>
|
||||
<input name="download_logbook" type="submit" value="Download logbook" />
|
||||
</p>
|
||||
</form>
|
||||
</td>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>
|
||||
surveys to Surveys.csv
|
||||
</td>
|
||||
<td>
|
||||
<form name="export" method="post" action="">
|
||||
<p>Overwrite the existing Surveys.csv file with one generated by Troggle.</p>
|
||||
<input disabled name="export_surveys" type="submit" value="Update {{settings.SURVEYS}}noinfo/Surveys.csv" />
|
||||
</form>
|
||||
</td>
|
||||
<td>
|
||||
<form name="export" method="get" action={% url "downloadsurveys" %}>
|
||||
<p>Download a Surveys.csv file which is dynamically generated by Troggle.</p>
|
||||
<input disabled name="download_surveys" type="submit" value="Download Surveys.csv" />
|
||||
</form>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
|
||||
<tr>
|
||||
<td>qms to qms.csv</td><td>
|
||||
<form name="export_qms" method="get" action="downloadqms">
|
||||
|
||||
<form name="export_qms" method="get" action="downloadqms">
|
||||
|
||||
<!--This is for choosing caves by area (drilldown).
|
||||
|
||||
<select id="qmcaveareachooser" class="searchable" >
|
||||
@@ -138,12 +141,12 @@
|
||||
|
||||
-->
|
||||
|
||||
Choose a cave.
|
||||
Choose a cave.
|
||||
<select name="cave_id" id="qmcavechooser">
|
||||
|
||||
{% for cave in caves %}
|
||||
<option value="{{cave.kataster_number}}">{{cave}}
|
||||
</option>
|
||||
</option>
|
||||
{% endfor %}
|
||||
|
||||
</select>
|
||||
@@ -154,4 +157,4 @@
|
||||
</table>
|
||||
</form>
|
||||
|
||||
{% endblock %}
|
||||
{% endblock %}
|
||||
@@ -1,4 +1,3 @@
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN">
|
||||
<!-- Only put one cave in this file -->
|
||||
<!-- If you edit this file, make sure you update the websites database -->
|
||||
<html lang="en">
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN">
|
||||
<!-- Only put one entrance in this file -->
|
||||
<!-- If you edit this file, make sure you update the websites database -->
|
||||
<html lang="en">
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
{% autoescape off %}
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<style type="text/css">.author {text-decoration:underline}</style>
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
<script src="{{ settings.TINY_MCE_MEDIA_URL }}tiny_mce.js" type="text/javascript"></script>
|
||||
{% endblock %}
|
||||
{% block content %}
|
||||
<h1>Edit Cave</h1>
|
||||
<form action="" method="post">{% csrf_token %}
|
||||
<table>{{ form }}{{caveAndEntranceFormSet}}</table>
|
||||
{{ versionControlForm }}
|
||||
|
||||
@@ -3,6 +3,11 @@
|
||||
{% block extrahead %}
|
||||
{% load csrffaker %}
|
||||
<script src="{{ settings.TINY_MCE_MEDIA_URL }}tiny_mce.js" type="text/javascript"></script>
|
||||
<script type="text/javascript">
|
||||
tinyMCE.init({
|
||||
mode : "textareas"
|
||||
});
|
||||
</script>
|
||||
{% endblock %}
|
||||
{% block body %}
|
||||
<h1>Edit {{ path }}</h1>
|
||||
|
||||
@@ -10,10 +10,6 @@
|
||||
|
||||
{% block content %}
|
||||
|
||||
{% if message %}
|
||||
<p>debug message: {{message}}</p>
|
||||
{% endif %}
|
||||
|
||||
<h2>{{expedition.name}}</h2>
|
||||
|
||||
<p><b>Other years:</b>
|
||||
@@ -41,13 +37,13 @@ an "S" for a survey trip. The colours are the same for people on the same trip.
|
||||
</tr>
|
||||
{% for personexpeditionday in personexpeditiondays %}
|
||||
<tr>
|
||||
<td><a href="{{ personexpeditionday.personexpedition.get_absolute_url }}">{{personexpeditionday.personexpedition.person}}</a></td>
|
||||
<td><a href="{{ personexpeditionday.personexpedition.get_absolute_url }}">{{personexpeditionday.personexpedition.person|safe}}</a></td>
|
||||
{% for persondayactivities in personexpeditionday.personrow %}
|
||||
|
||||
{% if persondayactivities.persontrips or persondayactivities.survexblocks %}
|
||||
<td class="persondayactivity">
|
||||
{% for persontrip in persondayactivities.persontrips %}
|
||||
<a href="{{persontrip.logbook_entry.get_absolute_url}}" class="dayindexlog-{{persontrip.logbook_entry.DayIndex}}">T</a>
|
||||
<a href="{{persontrip.logbook_entry.get_absolute_url}}" class="dayindexlog-1">T</a>
|
||||
{% endfor %}
|
||||
<br/>
|
||||
{% for survexblock in persondayactivities.survexblocks %}
|
||||
@@ -73,7 +69,7 @@ an "S" for a survey trip. The colours are the same for people on the same trip.
|
||||
{% regroup dateditems|dictsort:"date" by date as dates %}
|
||||
{% for date in dates %}
|
||||
<tr>
|
||||
<td>{{date.grouper}}</td>
|
||||
<td>{{date.grouper|date:"D d M Y"}}</td>
|
||||
<td>{% for item in date.list %}
|
||||
{% if item.isLogbookEntry %}<a href="{{ item.get_absolute_url }}">{{item.title|safe}}</a><br/>{% endif %}
|
||||
{% endfor %}</td>
|
||||
|
||||
4
templates/experimental.html
Normal file → Executable file
4
templates/experimental.html
Normal file → Executable file
@@ -8,7 +8,9 @@
|
||||
|
||||
<h1>Expo Experimental</h1>
|
||||
|
||||
<p>Number of survey legs: {{nsurvexlegs}}, total length: {{totalsurvexlength}}</p>
|
||||
<p>Number of survey legs: {{nsurvexlegs}}<br />
|
||||
Total length: {{totalsurvexlength}} m on importing survex files.<br />
|
||||
Total length: {{addupsurvexlength}} m adding up all the years below.</p>
|
||||
|
||||
<table>
|
||||
<tr><th>Year</th><th>Surveys</th><th>Survey Legs</th><th>Total length</th></tr>
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
<title>{% block title %}{% endblock %}
|
||||
</title>
|
||||
<link rel="stylesheet" type="text/css" href="../css/main2.css" />
|
||||
|
||||
</head>
|
||||
<body>
|
||||
<div id="mainmenu">
|
||||
@@ -13,17 +12,19 @@
|
||||
<li><a href="/index.htm">Expo website home</a></li>
|
||||
<li><a href="/intro.html">Introduction</a></li>
|
||||
<li><a href="/infodx.htm">Main index</a></li>
|
||||
<li><a href="/indxal.htm">Cave index</a></li>
|
||||
<li><a href="/caves">Cave index</a></li>
|
||||
{% if cavepage %}
|
||||
<ul>
|
||||
<li><a href="{% url "survexcaveslist" %}">All Survex</a></li>
|
||||
<li><a href="{% url "surveyscansfolders" %}">Scans</a></li>
|
||||
<li><a href="{% url "tunneldata" %}">Tunneldata</a></li>
|
||||
<li><a href="{% url "survexcavessingle" 161 %}">161</a></li>
|
||||
<li><a href="{% url "survexcavessingle" 204 %}">204</a></li>
|
||||
<li><a href="{% url "survexcavessingle" 258 %}">258</a></li>
|
||||
<li><a href="{% url "expedition" 2012 %}">Expo2012</a></li>
|
||||
<li><a href="{% url "expedition" 2013 %}">Expo2013</a></li>
|
||||
<li><a href="{% url "survexcavessingle" "caves-1623/290/290.svx" %}">290</a></li>
|
||||
<li><a href="{% url "survexcavessingle" "caves-1623/291/291.svx" %}">291</a></li>
|
||||
<li><a href="{% url "survexcavessingle" "caves-1626/359/359.svx" %}">359</a></li>
|
||||
<li><a href="{% url "survexcavessingle" "caves-1623/258/258.svx" %}">258</a></li>
|
||||
<li><a href="{% url "survexcavessingle" "caves-1623/264/264.svx" %}">264</a></li>
|
||||
<li><a href="{% url "expedition" 2018 %}">Expo2018</a></li>
|
||||
<li><a href="{% url "expedition" 2019 %}">Expo2019</a></li>
|
||||
<li><a href="/admin">Django admin</a></li>
|
||||
</ul>
|
||||
{% endif %}
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
{% if entry.is_deletion %}
|
||||
{{ entry.object_repr }}
|
||||
{% else %}
|
||||
<a href="admin/{{ entry.get_admin_url }}">{{ entry.object_repr }}</a>
|
||||
<a href="admin/{{ entry.get_admin_url }}/">{{ entry.object_repr }}</a>
|
||||
{% endif %}
|
||||
<br/>
|
||||
{% if entry.content_type %}
|
||||
@@ -38,7 +38,7 @@
|
||||
<div id="col1">
|
||||
<h3>Welcome</h3>
|
||||
<p class="indent">
|
||||
This is Troggle, the information portal for Cambridge University Caving Club's Expeditions to Austria.
|
||||
This is Troggle, the online system for Cambridge University Caving Club's Expeditions to Austria.
|
||||
</p>
|
||||
|
||||
<p class="indent">
|
||||
@@ -46,20 +46,9 @@ Here you will find information about the {{expedition.objects.count}} expedition
|
||||
</p>
|
||||
|
||||
<p class="indent">
|
||||
If you are an expedition member, please sign up using the link to the top right and begin editing.
|
||||
If you are an expedition member, please sign up using the link to the top right.
|
||||
</p>
|
||||
|
||||
<h3>News</h3>
|
||||
|
||||
<p class="indent">
|
||||
Everyone is gearing up for the 2009 expedition; please see the link below for the main expedition website.
|
||||
</p>
|
||||
|
||||
<h3>Troggle development</h3>
|
||||
<p class="indent">
|
||||
Troggle is still under development. Check out the <a href="http://troggle.googlecode.com">development page</a> on google code, where you can file bug reports, make suggestions, and help develop the code. There is also an old todo list at <a href="{%url "todo"%}">here</a>.
|
||||
</p>
|
||||
</div>
|
||||
{% endblock content %}
|
||||
|
||||
{% block margins %}
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
{% load wiki_markup %}
|
||||
|
||||
{% block title %}Logbook {{logbookentry.id}}{% endblock %}
|
||||
{% block editLink %}<a href={{logbookentry.get_admin_url}}>Edit logbook entry {{logbookentry|wiki_to_html_short}}</a>{% endblock %}
|
||||
{% block editLink %}<a href={{logbookentry.get_admin_url}}/>Edit logbook entry {{logbookentry|wiki_to_html_short}}</a>{% endblock %}
|
||||
{% block content %}
|
||||
|
||||
{% block related %}{% endblock %}
|
||||
{% block nav %}{% endblock %}
|
||||
<h2>{{logbookentry.title}}</h2>
|
||||
<h2>{{logbookentry.title|safe}}</h2>
|
||||
|
||||
<div id="related">
|
||||
<p><a href="{{ logbookentry.expedition.get_absolute_url }}">{{logbookentry.expedition.name}}</a></p>
|
||||
@@ -20,10 +20,10 @@
|
||||
|
||||
<p>
|
||||
{% if logbookentry.get_previous_by_date %}
|
||||
<a href="{{ logbookentry.get_previous_by_date.get_absolute_url }}">{{logbookentry.get_previous_by_date.date}}</a>
|
||||
<a href="{{ logbookentry.get_previous_by_date.get_absolute_url }}">{{logbookentry.get_previous_by_date.date|date:"D d M Y"}}</a>
|
||||
{% endif %}
|
||||
{% if logbookentry.get_next_by_date %}
|
||||
<a href="{{ logbookentry.get_next_by_date.get_absolute_url }}">{{logbookentry.get_next_by_date.date}}</a>
|
||||
<a href="{{ logbookentry.get_next_by_date.get_absolute_url }}">{{logbookentry.get_next_by_date.date|date:"D d M Y"}}</a>
|
||||
{% endif %}
|
||||
</p>
|
||||
|
||||
@@ -47,12 +47,12 @@
|
||||
|
||||
<td>
|
||||
{% if persontrip.persontrip_prev %}
|
||||
<a href="{{ persontrip.persontrip_prev.logbook_entry.get_absolute_url }}">{{persontrip.persontrip_prev.logbook_entry.date}}</a>
|
||||
<a href="{{ persontrip.persontrip_prev.logbook_entry.get_absolute_url }}">{{persontrip.persontrip_prev.logbook_entry.date|date:"D d M Y"}}</a>
|
||||
{% endif %}
|
||||
</td>
|
||||
<td>
|
||||
{% if persontrip.persontrip_next %}
|
||||
<a href="{{ persontrip.persontrip_next.logbook_entry.get_absolute_url }}">{{persontrip.persontrip_next.logbook_entry.date}}</a>
|
||||
<a href="{{ persontrip.persontrip_next.logbook_entry.get_absolute_url }}">{{persontrip.persontrip_next.logbook_entry.date|date:"D d M Y"}}</a>
|
||||
{% endif %}
|
||||
</td>
|
||||
|
||||
@@ -65,9 +65,14 @@
|
||||
</div>
|
||||
|
||||
<div id="col1">
|
||||
<div class="logbookentry">
|
||||
<b>{{logbookentry.date}}</b>
|
||||
{{logbookentry.text|wiki_to_html}}</div>
|
||||
<div class="logbookentry">
|
||||
<b>{{logbookentry.date|date:"D d M Y"}}</b>
|
||||
{% if logbookentry.entry_type == "html" %}
|
||||
<p>{{logbookentry.text|safe}}</p>
|
||||
{% else %}
|
||||
{{logbookentry.text|wiki_to_html}}
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -2,11 +2,14 @@
|
||||
<ul id="links">
|
||||
<li><a href="/index.htm">Home</a></li>
|
||||
<li><a href="/infodx.htm">Main Index</a></li>
|
||||
<li><a href="/troggle">Troggle</a></li>
|
||||
<li><a href="/areas.htm">Areas</a></li>
|
||||
<li><a href="/indxal.htm">Caves</a></li>
|
||||
<li><a href="/handbook/index.htm">Handbook</a></li>
|
||||
<li><a href="/pubs.htm">Reports</a></li>
|
||||
<li><a href="/areas.htm">Areas</a></li>
|
||||
<li><a href="/caves">Caves</a></li>
|
||||
<li><a href="/expedition/2019">Troggle</a></li>
|
||||
<li><form name=P method=get action="/search" target="_top">
|
||||
<input id="omega-autofocus" type=search name=P value="testing" size=8 autofocus>
|
||||
<input type=submit value="Search"></li>
|
||||
{% if editable %}<li><a href="{% url "editflatpage" path %}" class="editlink"><strong>Edit this page</strong></a></li>{% endif %}
|
||||
{% if cave_editable %}<li><a href="{% url "edit_cave" cave_editable %}" class="editlink"><strong>Edit this cave</strong></a></li>{% endif %}
|
||||
</ul>
|
||||
|
||||
45
templates/pathsreport.html
Normal file
45
templates/pathsreport.html
Normal file
@@ -0,0 +1,45 @@
|
||||
{% extends "base.html" %}
|
||||
{% load wiki_markup %}
|
||||
{% load link %}
|
||||
|
||||
{% block title %}Troggle paths report{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
|
||||
<h1>Expo Troggle paths report</h1>
|
||||
|
||||
|
||||
<p>
|
||||
|
||||
<table style="font-family: Consolas, Lucida Console, monospace;">
|
||||
<tr><th>Code</th><th>Path</th></tr>
|
||||
{% for c,p in bycodeslist %}
|
||||
<tr>
|
||||
<td>
|
||||
{{c}}
|
||||
</td>
|
||||
<td>
|
||||
{{p}}
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
|
||||
<p>
|
||||
<table style="font-family: Consolas, Lucida Console, monospace;">
|
||||
<tr><th>Path</th><th>Code</th></tr>
|
||||
{% for c,p in bypathslist %}
|
||||
<tr>
|
||||
<td>
|
||||
{{p}}
|
||||
</td>
|
||||
<td>
|
||||
{{c}}
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
<p>
|
||||
There are {{ ncodes }} different path codes defined.
|
||||
{% endblock %}
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
{% block content %}
|
||||
<h1>
|
||||
<a href="{{personexpedition.person.get_absolute_url}}">{{personexpedition.person}}</a> :
|
||||
<a href="{{personexpedition.person.get_absolute_url}}">{{personexpedition.person|safe}}</a> :
|
||||
<a href="{{personexpedition.expedition.get_absolute_url}}">{{personexpedition.expedition}}</a>
|
||||
</h1>
|
||||
|
||||
|
||||
@@ -8,15 +8,16 @@
|
||||
<h2>Notable expoers</h2>
|
||||
<table class="searchable">
|
||||
<tr><th>Person</th><th>First</th><th>Last</th><th>Notability</th></tr>
|
||||
{% for person in notablepersons %}
|
||||
{% for person in notablepersons|dictsortreversed:"notability" %}
|
||||
<tr>
|
||||
<td><a href="{{ person.get_absolute_url }}">{{person|wiki_to_html_short}}</a></td>
|
||||
<td><a href="{{ person.first.get_absolute_url }}">{{ person.first.expedition.year }}</a></td>
|
||||
<td><a href="{{ person.last.get_absolute_url }}">{{ person.last.expedition.year }}</a></td>
|
||||
<td>{{person.notability}}</td>
|
||||
<td>{{person.notability|floatformat:2}}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
<p>This is based purely on attendance, not on activities, surveying or usefulness of any kind. But as Woody Allen said: "90% of success is just turning up". It should really be called "Notably recent expoers" as the metric is just a geometric "recency" (1/2 for attending last year, 1/3 for the year before, etc., added up. Display cuttoff is 1/3.).
|
||||
|
||||
|
||||
<h2>All expoers</h2>
|
||||
@@ -31,8 +32,8 @@
|
||||
<tr>
|
||||
<td><a href="{{ person.get_absolute_url }}">{{person|wiki_to_html_short}}</a></td>
|
||||
<td><a href="{{ person.first.get_absolute_url }}">{{person.first.expedition.year}}</a></td>
|
||||
<td><a href="{{ person.last.get_absolute_url }}">{{person.last.expedition.year}}</a></td>
|
||||
<td>{{ person.surveyedleglength }}</td>
|
||||
<td><a href="{{ person.last.get_absolute_url }}">{{person.last.expedition.year}}</a></td>
|
||||
<td></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
{% load wiki_markup %}
|
||||
{% load survex_markup %}
|
||||
|
||||
{% block title %}Survex Scans Folder{% endblock %}
|
||||
{% block title %}Survey Scans Folder{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
|
||||
<h3>Survex Scans in: {{survexscansfolder.walletname}}</h3>
|
||||
<h3>Survey Scans in: {{survexscansfolder.walletname}}</h3>
|
||||
<table>
|
||||
{% for survexscansingle in survexscansfolder.survexscansingle_set.all %}
|
||||
<tr>
|
||||
@@ -20,7 +20,7 @@
|
||||
{% endfor %}
|
||||
</table>
|
||||
|
||||
<h3>Surveys referring to this wallet</h3>
|
||||
<h3>Survex surveys referring to this wallet</h3>
|
||||
|
||||
<table>
|
||||
{% for survexblock in survexscansfolder.survexblock_set.all %}
|
||||
|
||||
@@ -2,11 +2,15 @@
|
||||
{% load wiki_markup %}
|
||||
{% load survex_markup %}
|
||||
|
||||
{% block title %}All Survex scans folders{% endblock %}
|
||||
{% block title %}All Survey scans folders (wallets){% endblock %}
|
||||
|
||||
{% block content %}
|
||||
|
||||
<h3>All Survex scans folders</h3>
|
||||
<h3>All Survey scans folders (wallets)</h3>
|
||||
<p>Each wallet contains the scanned original in-cave survey notes and sketches of
|
||||
plans and elevations. It also contains scans of centre-line survex output on which
|
||||
hand-drawn passage sections are drawn. These hand-drawn passages will eventually be
|
||||
traced to produce Tunnel or Therion drawings and eventually the final complete cave survey.
|
||||
<table>
|
||||
<tr><th>Scans folder</th><th>Files</th><th>Survex blocks</th></tr>
|
||||
{% for survexscansfolder in survexscansfolders %}
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
<td>{{survexblock.name}}</td>
|
||||
<td>
|
||||
{% if survexblock.expedition %}
|
||||
<a href="{{survexblock.expedition.get_absolute_url}}">{{survexblock.date}}</a>
|
||||
<a href="{{survexblock.expedition.get_absolute_url}}">{{survexblock.date|date:"D d M Y"}}</a>
|
||||
{% else %}
|
||||
{{survexblock.date}}
|
||||
{% endif %}
|
||||
|
||||
@@ -46,7 +46,7 @@ $(document).ready(function()
|
||||
</p>
|
||||
{% endif %}
|
||||
|
||||
<form id="codewikiform" action="" method="POST">
|
||||
<form id="codewikiform" action="" method="POST">{% csrf_token %}
|
||||
<div class="codeframebit">{{form.code}}</div>
|
||||
<div style="display:none">{{form.filename}} {{form.dirname}} {{form.datetime}} {{form.outputtype}}</div>
|
||||
<input type="submit" name="diff" value="Diffy" />
|
||||
|
||||
@@ -6,14 +6,13 @@
|
||||
|
||||
{% block content %}
|
||||
|
||||
<h3>All Tunnel files</h3>
|
||||
<h3>All Tunnel files - references to wallets and survey scans</h3>
|
||||
<table>
|
||||
<tr><th>File</th><th>Font</th><th>SurvexBlocks</th><th>Size</th><th>Paths</th><th>Scans folder</th><th>Scan files</th><th>Frames</th></tr>
|
||||
<tr><th>File</th><th>Font</th><th>Size</th><th>Paths</th><th>Scans folder</th><th>Scan files</th><th>Frames</th></tr>
|
||||
{% for tunnelfile in tunnelfiles %}
|
||||
<tr>
|
||||
<td><a href="{% url "tunnelfile" tunnelfile.tunnelpath %}">{{tunnelfile.tunnelpath}}</a></td>
|
||||
<td>{{tunnelfile.bfontcolours}}</td>
|
||||
<td></td>
|
||||
<td>{{tunnelfile.filesize}}</td>
|
||||
<td>{{tunnelfile.npaths}}</td>
|
||||
|
||||
|
||||
46
urls.py
Normal file → Executable file
46
urls.py
Normal file → Executable file
@@ -15,32 +15,28 @@ admin.autodiscover()
|
||||
|
||||
# type url probably means it's used.
|
||||
|
||||
# HOW DOES THIS WORK:
|
||||
# url( <regular expression that matches the thing in the web browser>,
|
||||
# HOW DOES THIS WORK:
|
||||
# url( <regular expression that matches the thing in the web browser>,
|
||||
# <reference to python function in 'core' folder>,
|
||||
# <name optional argument for URL reversing (doesn't do much)>)
|
||||
# <name optional argument for URL reversing (doesn't do much)>)
|
||||
|
||||
actualurlpatterns = patterns('',
|
||||
|
||||
url(r'^testingurl/?$' , views_caves.millenialcaves, name="testing"),
|
||||
|
||||
url(r'^millenialcaves/?$', views_caves.millenialcaves, name="millenialcaves"),
|
||||
|
||||
url(r'^troggle$', views_other.frontpage, name="frontpage"),
|
||||
url(r'^troggle$', views_other.frontpage, name="frontpage"),
|
||||
url(r'^todo/$', views_other.todo, name="todo"),
|
||||
|
||||
url(r'^caves/?$', views_caves.caveindex, name="caveindex"),
|
||||
url(r'^caves$', views_caves.caveindex, name="caveindex"),
|
||||
url(r'^people/?$', views_logbooks.personindex, name="personindex"),
|
||||
|
||||
url(r'^newqmnumber/?$', views_other.ajax_QM_number, ),
|
||||
url(r'^lbo_suggestions/?$', logbook_entry_suggestions),
|
||||
#(r'^person/(?P<person_id>\d*)/?$', views_logbooks.person),
|
||||
url(r'^person/(?P<first_name>[A-Z]*[a-z\-\']*)[^a-zA-Z]*(?P<last_name>[a-z\-\']*[^a-zA-Z]*[A-Z]*[a-z\-]*)/?', views_logbooks.person, name="person"),
|
||||
url(r'^person/(?P<first_name>[A-Z]*[a-z\-\'&;]*)[^a-zA-Z]*(?P<last_name>[a-z\-\']*[^a-zA-Z]*[A-Z]*[a-z\-&;]*)/?', views_logbooks.person, name="person"),
|
||||
#url(r'^person/(\w+_\w+)$', views_logbooks.person, name="person"),
|
||||
|
||||
url(r'^expedition/(\d+)$', views_logbooks.expedition, name="expedition"),
|
||||
url(r'^expeditions/?$', views_logbooks.ExpeditionListView.as_view(), name="expeditions"),
|
||||
url(r'^personexpedition/(?P<first_name>[A-Z]*[a-z]*)[^a-zA-Z]*(?P<last_name>[A-Z]*[a-z]*)/(?P<year>\d+)/?$', views_logbooks.personexpedition, name="personexpedition"),
|
||||
url(r'^personexpedition/(?P<first_name>[A-Z]*[a-z&;]*)[^a-zA-Z]*(?P<last_name>[A-Z]*[a-zA-Z&;]*)/(?P<year>\d+)/?$', views_logbooks.personexpedition, name="personexpedition"),
|
||||
url(r'^logbookentry/(?P<date>.*)/(?P<slug>.*)/?$', views_logbooks.logbookentry,name="logbookentry"),
|
||||
url(r'^newlogbookentry/(?P<expeditionyear>.*)$', views_logbooks.newLogbookEntry, name="newLogBookEntry"),
|
||||
url(r'^editlogbookentry/(?P<expeditionyear>[^/]*)/(?P<pdate>[^/]*)/(?P<pslug>[^/]*)/$', views_logbooks.newLogbookEntry, name="editLogBookEntry"),
|
||||
@@ -48,17 +44,17 @@ actualurlpatterns = patterns('',
|
||||
url(r'^newfile', views_other.newFile, name="newFile"),
|
||||
|
||||
url(r'^getEntrances/(?P<caveslug>.*)', views_caves.get_entrances, name = "get_entrances"),
|
||||
url(r'^getQMs/(?P<caveslug>.*)', views_caves.get_qms, name = "get_qms"),
|
||||
url(r'^getQMs/(?P<caveslug>.*)', views_caves.get_qms, name = "get_qms"), # no template "get_qms"?
|
||||
url(r'^getPeople/(?P<expeditionslug>.*)', views_logbooks.get_people, name = "get_people"),
|
||||
url(r'^getLogBookEntries/(?P<expeditionslug>.*)', views_logbooks.get_logbook_entries, name = "get_logbook_entries"),
|
||||
|
||||
|
||||
url(r'^cave/new/$', edit_cave, name="newcave"),
|
||||
url(r'^cave/new/$', views_caves.edit_cave, name="newcave"),
|
||||
url(r'^cave/(?P<cave_id>[^/]+)/?$', views_caves.cave, name="cave"),
|
||||
url(r'^caveslug/([^/]+)/?$', views_caves.caveSlug, name="caveSlug"),
|
||||
url(r'^cave/entrance/([^/]+)/?$', views_caves.caveEntrance),
|
||||
url(r'^cave/description/([^/]+)/?$', views_caves.caveDescription),
|
||||
url(r'^cave/qms/([^/]+)/?$', views_caves.caveQMs),
|
||||
url(r'^cave/qms/([^/]+)/?$', views_caves.caveQMs), # blank page
|
||||
url(r'^cave/logbook/([^/]+)/?$', views_caves.caveLogbook),
|
||||
url(r'^entrance/(?P<caveslug>[^/]+)/(?P<slug>[^/]+)/edit/', views_caves.editEntrance, name = "editentrance"),
|
||||
url(r'^entrance/new/(?P<caveslug>[^/]+)/', views_caves.editEntrance, name = "newentrance"),
|
||||
@@ -71,7 +67,7 @@ actualurlpatterns = patterns('',
|
||||
# url(r'^jgtuploadfile$', view_surveys.jgtuploadfile, name="jgtuploadfile"),
|
||||
|
||||
url(r'^cave/(?P<cave_id>[^/]+)/?(?P<ent_letter>[^/])$', ent),
|
||||
url(r'^cave/(?P<slug>[^/]+)/edit/$', edit_cave, name="edit_cave"),
|
||||
url(r'^cave/(?P<slug>[^/]+)/edit/$', views_caves.edit_cave, name="edit_cave"),
|
||||
#(r'^cavesearch', caveSearch),
|
||||
|
||||
|
||||
@@ -87,9 +83,8 @@ actualurlpatterns = patterns('',
|
||||
url(r'^survey/?$', surveyindex, name="survey"),
|
||||
url(r'^survey/(?P<year>\d\d\d\d)\#(?P<wallet_number>\d*)$', survey, name="survey"),
|
||||
|
||||
# Is all this lot out of date ? Maybe the logbooks work?
|
||||
url(r'^controlpanel/?$', views_other.controlPanel, name="controlpanel"),
|
||||
url(r'^CAVETAB2\.CSV/?$', views_other.downloadCavetab, name="downloadcavetab"),
|
||||
url(r'^Surveys\.csv/?$', views_other.downloadSurveys, name="downloadsurveys"),
|
||||
url(r'^logbook(?P<year>\d\d\d\d)\.(?P<extension>.*)/?$',views_other.downloadLogbook),
|
||||
url(r'^logbook/?$',views_other.downloadLogbook, name="downloadlogbook"),
|
||||
url(r'^cave/(?P<cave_id>[^/]+)/qm\.csv/?$', views_other.downloadQMs, name="downloadqms"),
|
||||
@@ -111,6 +106,10 @@ actualurlpatterns = patterns('',
|
||||
|
||||
# (r'^personform/(.*)$', personForm),
|
||||
|
||||
(r'^expofiles/(?P<path>.*)$', 'django.views.static.serve',
|
||||
{'document_root': settings.EXPOFILES, 'show_indexes': True}),
|
||||
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
|
||||
{'document_root': settings.STATIC_ROOT, 'show_indexes': True}),
|
||||
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve',
|
||||
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
|
||||
(r'^tinymce_media/(?P<path>.*)$', 'django.views.static.serve',
|
||||
@@ -124,9 +123,9 @@ actualurlpatterns = patterns('',
|
||||
url(r'^survexfile/(?P<survex_file>.*?)\.err$', views_survex.err),
|
||||
|
||||
|
||||
url(r'^survexfile/caves/$', views_survex.survexcaveslist, name="survexcaveslist"),
|
||||
url(r'^survexfile/caves/(?P<survex_cave>.*)$', views_survex.survexcavesingle, name="survexcavessingle"),
|
||||
url(r'^survexfileraw/(?P<survex_file>.*?)\.svx$', views_survex.svxraw, name="svxraw"),
|
||||
url(r'^survexfile/caves/$', views_survex.survexcaveslist, name="survexcaveslist"),
|
||||
url(r'^survexfile/(?P<survex_cave>.*)$', views_survex.survexcavesingle, name="survexcavessingle"),
|
||||
url(r'^survexfileraw/(?P<survex_file>.*?)\.svx$', views_survex.svxraw, name="svxraw"),
|
||||
|
||||
|
||||
(r'^survey_files/listdir/(?P<path>.*)$', view_surveys.listdir),
|
||||
@@ -138,7 +137,7 @@ actualurlpatterns = patterns('',
|
||||
#(r'^survey_scans/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.SURVEY_SCANS, 'show_indexes':True}),
|
||||
url(r'^survey_scans/$', view_surveys.surveyscansfolders, name="surveyscansfolders"),
|
||||
url(r'^survey_scans/(?P<path>[^/]+)/$', view_surveys.surveyscansfolder, name="surveyscansfolder"),
|
||||
url(r'^survey_scans/(?P<path>[^/]+)/(?P<file>[^/]+(?:png|jpg|jpeg))$',
|
||||
url(r'^survey_scans/(?P<path>[^/]+)/(?P<file>[^/]+(?:png|jpg|jpeg|pdf|PNG|JPG|JPEG|PDF))$',
|
||||
view_surveys.surveyscansingle, name="surveyscansingle"),
|
||||
|
||||
url(r'^tunneldata/$', view_surveys.tunneldata, name="tunneldata"),
|
||||
@@ -147,8 +146,8 @@ actualurlpatterns = patterns('',
|
||||
|
||||
#url(r'^tunneldatainfo/(?P<path>.+?\.xml)$', view_surveys.tunnelfileinfo, name="tunnelfileinfo"),
|
||||
|
||||
(r'^photos/(?P<path>.*)$', 'django.views.static.serve',
|
||||
{'document_root': settings.PHOTOS_ROOT, 'show_indexes':True}),
|
||||
#(r'^photos/(?P<path>.*)$', 'django.views.static.serve',
|
||||
#{'document_root': settings.PHOTOS_ROOT, 'show_indexes':True}),
|
||||
|
||||
url(r'^prospecting/(?P<name>[^.]+).png$', prospecting_image, name="prospecting_image"),
|
||||
|
||||
@@ -157,6 +156,7 @@ actualurlpatterns = patterns('',
|
||||
|
||||
# for those silly ideas
|
||||
url(r'^experimental.*$', views_logbooks.experimental, name="experimental"),
|
||||
url(r'^pathsreport.*$', views_logbooks.pathsreport, name="pathsreport"),
|
||||
|
||||
#url(r'^trip_report/?$',views_other.tripreport,name="trip_report")
|
||||
|
||||
|
||||
16
utils.py
16
utils.py
@@ -1,4 +1,5 @@
|
||||
from django.conf import settings
|
||||
from django.shortcuts import render
|
||||
import random, re, logging
|
||||
from troggle.core.models import CaveDescription
|
||||
|
||||
@@ -58,21 +59,6 @@ def save_carefully(objectType, lookupAttribs={}, nonLookupAttribs={}):
|
||||
if not created and not instance.new_since_parsing:
|
||||
logging.info(str(instance) + " existed in the database unchanged since last parse. It was overwritten by the current script. \n")
|
||||
return (instance, created)
|
||||
|
||||
def render_with_context(req, *args, **kwargs):
|
||||
"""this is the snippet from http://www.djangosnippets.org/snippets/3/
|
||||
|
||||
Django uses Context, not RequestContext when you call render_to_response.
|
||||
We always want to use RequestContext, so that django adds the context from
|
||||
settings.TEMPLATE_CONTEXT_PROCESSORS. This way we automatically get
|
||||
necessary settings variables passed to each template. So we use a custom
|
||||
method, render_response instead of render_to_response. Hopefully future
|
||||
Django releases will make this unnecessary."""
|
||||
|
||||
from django.shortcuts import render_to_response
|
||||
from django.template import RequestContext
|
||||
kwargs['context_instance'] = RequestContext(req)
|
||||
return render_to_response(*args, **kwargs)
|
||||
|
||||
re_body = re.compile(r"\<body[^>]*\>(.*)\</body\>", re.DOTALL)
|
||||
re_title = re.compile(r"\<title[^>]*\>(.*)\</title\>", re.DOTALL)
|
||||
|
||||
Reference in New Issue
Block a user