Initial commit. Basic models mostly done.
This commit is contained in:
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
59
venv/lib/python3.8/site-packages/django/utils/_os.py
Normal file
59
venv/lib/python3.8/site-packages/django/utils/_os.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import os
|
||||
import tempfile
|
||||
from os.path import abspath, dirname, join, normcase, sep
|
||||
from pathlib import Path
|
||||
|
||||
from django.core.exceptions import SuspiciousFileOperation
|
||||
|
||||
|
||||
def safe_join(base, *paths):
|
||||
"""
|
||||
Join one or more path components to the base path component intelligently.
|
||||
Return a normalized, absolute version of the final path.
|
||||
|
||||
Raise ValueError if the final path isn't located inside of the base path
|
||||
component.
|
||||
"""
|
||||
final_path = abspath(join(base, *paths))
|
||||
base_path = abspath(base)
|
||||
# Ensure final_path starts with base_path (using normcase to ensure we
|
||||
# don't false-negative on case insensitive operating systems like Windows),
|
||||
# further, one of the following conditions must be true:
|
||||
# a) The next character is the path separator (to prevent conditions like
|
||||
# safe_join("/dir", "/../d"))
|
||||
# b) The final path must be the same as the base path.
|
||||
# c) The base path must be the most root path (meaning either "/" or "C:\\")
|
||||
if (not normcase(final_path).startswith(normcase(base_path + sep)) and
|
||||
normcase(final_path) != normcase(base_path) and
|
||||
dirname(normcase(base_path)) != normcase(base_path)):
|
||||
raise SuspiciousFileOperation(
|
||||
'The joined path ({}) is located outside of the base path '
|
||||
'component ({})'.format(final_path, base_path))
|
||||
return final_path
|
||||
|
||||
|
||||
def symlinks_supported():
|
||||
"""
|
||||
Return whether or not creating symlinks are supported in the host platform
|
||||
and/or if they are allowed to be created (e.g. on Windows it requires admin
|
||||
permissions).
|
||||
"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
original_path = os.path.join(temp_dir, 'original')
|
||||
symlink_path = os.path.join(temp_dir, 'symlink')
|
||||
os.makedirs(original_path)
|
||||
try:
|
||||
os.symlink(original_path, symlink_path)
|
||||
supported = True
|
||||
except (OSError, NotImplementedError):
|
||||
supported = False
|
||||
return supported
|
||||
|
||||
|
||||
def to_path(value):
|
||||
"""Convert value to a pathlib.Path instance, if not already a Path."""
|
||||
if isinstance(value, Path):
|
||||
return value
|
||||
elif not isinstance(value, str):
|
||||
raise TypeError('Invalid path type: %s' % type(value).__name__)
|
||||
return Path(value)
|
||||
226
venv/lib/python3.8/site-packages/django/utils/archive.py
Normal file
226
venv/lib/python3.8/site-packages/django/utils/archive.py
Normal file
@@ -0,0 +1,226 @@
|
||||
"""
|
||||
Based on "python-archive" -- https://pypi.org/project/python-archive/
|
||||
|
||||
Copyright (c) 2010 Gary Wilson Jr. <gary.wilson@gmail.com> and contributors.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
"""
|
||||
import os
|
||||
import shutil
|
||||
import stat
|
||||
import tarfile
|
||||
import zipfile
|
||||
|
||||
|
||||
class ArchiveException(Exception):
|
||||
"""
|
||||
Base exception class for all archive errors.
|
||||
"""
|
||||
|
||||
|
||||
class UnrecognizedArchiveFormat(ArchiveException):
|
||||
"""
|
||||
Error raised when passed file is not a recognized archive format.
|
||||
"""
|
||||
|
||||
|
||||
def extract(path, to_path):
|
||||
"""
|
||||
Unpack the tar or zip file at the specified path to the directory
|
||||
specified by to_path.
|
||||
"""
|
||||
with Archive(path) as archive:
|
||||
archive.extract(to_path)
|
||||
|
||||
|
||||
class Archive:
|
||||
"""
|
||||
The external API class that encapsulates an archive implementation.
|
||||
"""
|
||||
def __init__(self, file):
|
||||
self._archive = self._archive_cls(file)(file)
|
||||
|
||||
@staticmethod
|
||||
def _archive_cls(file):
|
||||
cls = None
|
||||
if isinstance(file, str):
|
||||
filename = file
|
||||
else:
|
||||
try:
|
||||
filename = file.name
|
||||
except AttributeError:
|
||||
raise UnrecognizedArchiveFormat(
|
||||
"File object not a recognized archive format.")
|
||||
base, tail_ext = os.path.splitext(filename.lower())
|
||||
cls = extension_map.get(tail_ext)
|
||||
if not cls:
|
||||
base, ext = os.path.splitext(base)
|
||||
cls = extension_map.get(ext)
|
||||
if not cls:
|
||||
raise UnrecognizedArchiveFormat(
|
||||
"Path not a recognized archive format: %s" % filename)
|
||||
return cls
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
|
||||
def extract(self, to_path):
|
||||
self._archive.extract(to_path)
|
||||
|
||||
def list(self):
|
||||
self._archive.list()
|
||||
|
||||
def close(self):
|
||||
self._archive.close()
|
||||
|
||||
|
||||
class BaseArchive:
|
||||
"""
|
||||
Base Archive class. Implementations should inherit this class.
|
||||
"""
|
||||
@staticmethod
|
||||
def _copy_permissions(mode, filename):
|
||||
"""
|
||||
If the file in the archive has some permissions (this assumes a file
|
||||
won't be writable/executable without being readable), apply those
|
||||
permissions to the unarchived file.
|
||||
"""
|
||||
if mode & stat.S_IROTH:
|
||||
os.chmod(filename, mode)
|
||||
|
||||
def split_leading_dir(self, path):
|
||||
path = str(path)
|
||||
path = path.lstrip('/').lstrip('\\')
|
||||
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or '\\' not in path):
|
||||
return path.split('/', 1)
|
||||
elif '\\' in path:
|
||||
return path.split('\\', 1)
|
||||
else:
|
||||
return path, ''
|
||||
|
||||
def has_leading_dir(self, paths):
|
||||
"""
|
||||
Return True if all the paths have the same leading path name
|
||||
(i.e., everything is in one subdirectory in an archive).
|
||||
"""
|
||||
common_prefix = None
|
||||
for path in paths:
|
||||
prefix, rest = self.split_leading_dir(path)
|
||||
if not prefix:
|
||||
return False
|
||||
elif common_prefix is None:
|
||||
common_prefix = prefix
|
||||
elif prefix != common_prefix:
|
||||
return False
|
||||
return True
|
||||
|
||||
def extract(self):
|
||||
raise NotImplementedError('subclasses of BaseArchive must provide an extract() method')
|
||||
|
||||
def list(self):
|
||||
raise NotImplementedError('subclasses of BaseArchive must provide a list() method')
|
||||
|
||||
|
||||
class TarArchive(BaseArchive):
|
||||
|
||||
def __init__(self, file):
|
||||
self._archive = tarfile.open(file)
|
||||
|
||||
def list(self, *args, **kwargs):
|
||||
self._archive.list(*args, **kwargs)
|
||||
|
||||
def extract(self, to_path):
|
||||
members = self._archive.getmembers()
|
||||
leading = self.has_leading_dir(x.name for x in members)
|
||||
for member in members:
|
||||
name = member.name
|
||||
if leading:
|
||||
name = self.split_leading_dir(name)[1]
|
||||
filename = os.path.join(to_path, name)
|
||||
if member.isdir():
|
||||
if filename:
|
||||
os.makedirs(filename, exist_ok=True)
|
||||
else:
|
||||
try:
|
||||
extracted = self._archive.extractfile(member)
|
||||
except (KeyError, AttributeError) as exc:
|
||||
# Some corrupt tar files seem to produce this
|
||||
# (specifically bad symlinks)
|
||||
print("In the tar file %s the member %s is invalid: %s" %
|
||||
(name, member.name, exc))
|
||||
else:
|
||||
dirname = os.path.dirname(filename)
|
||||
if dirname:
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
with open(filename, 'wb') as outfile:
|
||||
shutil.copyfileobj(extracted, outfile)
|
||||
self._copy_permissions(member.mode, filename)
|
||||
finally:
|
||||
if extracted:
|
||||
extracted.close()
|
||||
|
||||
def close(self):
|
||||
self._archive.close()
|
||||
|
||||
|
||||
class ZipArchive(BaseArchive):
|
||||
|
||||
def __init__(self, file):
|
||||
self._archive = zipfile.ZipFile(file)
|
||||
|
||||
def list(self, *args, **kwargs):
|
||||
self._archive.printdir(*args, **kwargs)
|
||||
|
||||
def extract(self, to_path):
|
||||
namelist = self._archive.namelist()
|
||||
leading = self.has_leading_dir(namelist)
|
||||
for name in namelist:
|
||||
data = self._archive.read(name)
|
||||
info = self._archive.getinfo(name)
|
||||
if leading:
|
||||
name = self.split_leading_dir(name)[1]
|
||||
filename = os.path.join(to_path, name)
|
||||
if filename.endswith(('/', '\\')):
|
||||
# A directory
|
||||
os.makedirs(filename, exist_ok=True)
|
||||
else:
|
||||
dirname = os.path.dirname(filename)
|
||||
if dirname:
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
with open(filename, 'wb') as outfile:
|
||||
outfile.write(data)
|
||||
# Convert ZipInfo.external_attr to mode
|
||||
mode = info.external_attr >> 16
|
||||
self._copy_permissions(mode, filename)
|
||||
|
||||
def close(self):
|
||||
self._archive.close()
|
||||
|
||||
|
||||
extension_map = dict.fromkeys((
|
||||
'.tar',
|
||||
'.tar.bz2', '.tbz2', '.tbz', '.tz2',
|
||||
'.tar.gz', '.tgz', '.taz',
|
||||
'.tar.lzma', '.tlz',
|
||||
'.tar.xz', '.txz',
|
||||
), TarArchive)
|
||||
extension_map['.zip'] = ZipArchive
|
||||
34
venv/lib/python3.8/site-packages/django/utils/asyncio.py
Normal file
34
venv/lib/python3.8/site-packages/django/utils/asyncio.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import asyncio
|
||||
import functools
|
||||
import os
|
||||
|
||||
from django.core.exceptions import SynchronousOnlyOperation
|
||||
|
||||
|
||||
def async_unsafe(message):
|
||||
"""
|
||||
Decorator to mark functions as async-unsafe. Someone trying to access
|
||||
the function while in an async context will get an error message.
|
||||
"""
|
||||
def decorator(func):
|
||||
@functools.wraps(func)
|
||||
def inner(*args, **kwargs):
|
||||
if not os.environ.get('DJANGO_ALLOW_ASYNC_UNSAFE'):
|
||||
# Detect a running event loop in this thread.
|
||||
try:
|
||||
event_loop = asyncio.get_event_loop()
|
||||
except RuntimeError:
|
||||
pass
|
||||
else:
|
||||
if event_loop.is_running():
|
||||
raise SynchronousOnlyOperation(message)
|
||||
# Pass onwards.
|
||||
return func(*args, **kwargs)
|
||||
return inner
|
||||
# If the message is actually a function, then be a no-arguments decorator.
|
||||
if callable(message):
|
||||
func = message
|
||||
message = 'You cannot call this from an async context - use a thread or sync_to_async.'
|
||||
return decorator(func)
|
||||
else:
|
||||
return decorator
|
||||
604
venv/lib/python3.8/site-packages/django/utils/autoreload.py
Normal file
604
venv/lib/python3.8/site-packages/django/utils/autoreload.py
Normal file
@@ -0,0 +1,604 @@
|
||||
import functools
|
||||
import itertools
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import weakref
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from types import ModuleType
|
||||
from zipimport import zipimporter
|
||||
|
||||
from django.apps import apps
|
||||
from django.core.signals import request_finished
|
||||
from django.dispatch import Signal
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.version import get_version_tuple
|
||||
|
||||
autoreload_started = Signal()
|
||||
file_changed = Signal(providing_args=['file_path', 'kind'])
|
||||
|
||||
DJANGO_AUTORELOAD_ENV = 'RUN_MAIN'
|
||||
|
||||
logger = logging.getLogger('django.utils.autoreload')
|
||||
|
||||
# If an error is raised while importing a file, it's not placed in sys.modules.
|
||||
# This means that any future modifications aren't caught. Keep a list of these
|
||||
# file paths to allow watching them in the future.
|
||||
_error_files = []
|
||||
_exception = None
|
||||
|
||||
try:
|
||||
import termios
|
||||
except ImportError:
|
||||
termios = None
|
||||
|
||||
|
||||
try:
|
||||
import pywatchman
|
||||
except ImportError:
|
||||
pywatchman = None
|
||||
|
||||
|
||||
def check_errors(fn):
|
||||
@functools.wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
global _exception
|
||||
try:
|
||||
fn(*args, **kwargs)
|
||||
except Exception:
|
||||
_exception = sys.exc_info()
|
||||
|
||||
et, ev, tb = _exception
|
||||
|
||||
if getattr(ev, 'filename', None) is None:
|
||||
# get the filename from the last item in the stack
|
||||
filename = traceback.extract_tb(tb)[-1][0]
|
||||
else:
|
||||
filename = ev.filename
|
||||
|
||||
if filename not in _error_files:
|
||||
_error_files.append(filename)
|
||||
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def raise_last_exception():
|
||||
global _exception
|
||||
if _exception is not None:
|
||||
raise _exception[1]
|
||||
|
||||
|
||||
def ensure_echo_on():
|
||||
"""
|
||||
Ensure that echo mode is enabled. Some tools such as PDB disable
|
||||
it which causes usability issues after reload.
|
||||
"""
|
||||
if not termios or not sys.stdin.isatty():
|
||||
return
|
||||
attr_list = termios.tcgetattr(sys.stdin)
|
||||
if not attr_list[3] & termios.ECHO:
|
||||
attr_list[3] |= termios.ECHO
|
||||
if hasattr(signal, 'SIGTTOU'):
|
||||
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
|
||||
else:
|
||||
old_handler = None
|
||||
termios.tcsetattr(sys.stdin, termios.TCSANOW, attr_list)
|
||||
if old_handler is not None:
|
||||
signal.signal(signal.SIGTTOU, old_handler)
|
||||
|
||||
|
||||
def iter_all_python_module_files():
|
||||
# This is a hot path during reloading. Create a stable sorted list of
|
||||
# modules based on the module name and pass it to iter_modules_and_files().
|
||||
# This ensures cached results are returned in the usual case that modules
|
||||
# aren't loaded on the fly.
|
||||
keys = sorted(sys.modules)
|
||||
modules = tuple(m for m in map(sys.modules.__getitem__, keys) if not isinstance(m, weakref.ProxyTypes))
|
||||
return iter_modules_and_files(modules, frozenset(_error_files))
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=1)
|
||||
def iter_modules_and_files(modules, extra_files):
|
||||
"""Iterate through all modules needed to be watched."""
|
||||
sys_file_paths = []
|
||||
for module in modules:
|
||||
# During debugging (with PyDev) the 'typing.io' and 'typing.re' objects
|
||||
# are added to sys.modules, however they are types not modules and so
|
||||
# cause issues here.
|
||||
if not isinstance(module, ModuleType):
|
||||
continue
|
||||
if module.__name__ == '__main__':
|
||||
# __main__ (usually manage.py) doesn't always have a __spec__ set.
|
||||
# Handle this by falling back to using __file__, resolved below.
|
||||
# See https://docs.python.org/reference/import.html#main-spec
|
||||
# __file__ may not exists, e.g. when running ipdb debugger.
|
||||
if hasattr(module, '__file__'):
|
||||
sys_file_paths.append(module.__file__)
|
||||
continue
|
||||
if getattr(module, '__spec__', None) is None:
|
||||
continue
|
||||
spec = module.__spec__
|
||||
# Modules could be loaded from places without a concrete location. If
|
||||
# this is the case, skip them.
|
||||
if spec.has_location:
|
||||
origin = spec.loader.archive if isinstance(spec.loader, zipimporter) else spec.origin
|
||||
sys_file_paths.append(origin)
|
||||
|
||||
results = set()
|
||||
for filename in itertools.chain(sys_file_paths, extra_files):
|
||||
if not filename:
|
||||
continue
|
||||
path = Path(filename)
|
||||
try:
|
||||
resolved_path = path.resolve(strict=True).absolute()
|
||||
except FileNotFoundError:
|
||||
# The module could have been removed, don't fail loudly if this
|
||||
# is the case.
|
||||
continue
|
||||
except ValueError as e:
|
||||
# Network filesystems may return null bytes in file paths.
|
||||
logger.debug('"%s" raised when resolving path: "%s"' % (str(e), path))
|
||||
continue
|
||||
results.add(resolved_path)
|
||||
return frozenset(results)
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=1)
|
||||
def common_roots(paths):
|
||||
"""
|
||||
Return a tuple of common roots that are shared between the given paths.
|
||||
File system watchers operate on directories and aren't cheap to create.
|
||||
Try to find the minimum set of directories to watch that encompass all of
|
||||
the files that need to be watched.
|
||||
"""
|
||||
# Inspired from Werkzeug:
|
||||
# https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py
|
||||
# Create a sorted list of the path components, longest first.
|
||||
path_parts = sorted([x.parts for x in paths], key=len, reverse=True)
|
||||
tree = {}
|
||||
for chunks in path_parts:
|
||||
node = tree
|
||||
# Add each part of the path to the tree.
|
||||
for chunk in chunks:
|
||||
node = node.setdefault(chunk, {})
|
||||
# Clear the last leaf in the tree.
|
||||
node.clear()
|
||||
|
||||
# Turn the tree into a list of Path instances.
|
||||
def _walk(node, path):
|
||||
for prefix, child in node.items():
|
||||
yield from _walk(child, path + (prefix,))
|
||||
if not node:
|
||||
yield Path(*path)
|
||||
|
||||
return tuple(_walk(tree, ()))
|
||||
|
||||
|
||||
def sys_path_directories():
|
||||
"""
|
||||
Yield absolute directories from sys.path, ignoring entries that don't
|
||||
exist.
|
||||
"""
|
||||
for path in sys.path:
|
||||
path = Path(path)
|
||||
try:
|
||||
resolved_path = path.resolve(strict=True).absolute()
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
# If the path is a file (like a zip file), watch the parent directory.
|
||||
if resolved_path.is_file():
|
||||
yield resolved_path.parent
|
||||
else:
|
||||
yield resolved_path
|
||||
|
||||
|
||||
def get_child_arguments():
|
||||
"""
|
||||
Return the executable. This contains a workaround for Windows if the
|
||||
executable is reported to not have the .exe extension which can cause bugs
|
||||
on reloading.
|
||||
"""
|
||||
import django.__main__
|
||||
|
||||
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
|
||||
if sys.argv[0] == django.__main__.__file__:
|
||||
# The server was started with `python -m django runserver`.
|
||||
args += ['-m', 'django']
|
||||
args += sys.argv[1:]
|
||||
else:
|
||||
args += sys.argv
|
||||
return args
|
||||
|
||||
|
||||
def trigger_reload(filename):
|
||||
logger.info('%s changed, reloading.', filename)
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
def restart_with_reloader():
|
||||
new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: 'true'}
|
||||
args = get_child_arguments()
|
||||
while True:
|
||||
p = subprocess.run(args, env=new_environ, close_fds=False)
|
||||
if p.returncode != 3:
|
||||
return p.returncode
|
||||
|
||||
|
||||
class BaseReloader:
|
||||
def __init__(self):
|
||||
self.extra_files = set()
|
||||
self.directory_globs = defaultdict(set)
|
||||
self._stop_condition = threading.Event()
|
||||
|
||||
def watch_dir(self, path, glob):
|
||||
path = Path(path)
|
||||
try:
|
||||
path = path.absolute()
|
||||
except FileNotFoundError:
|
||||
logger.debug(
|
||||
'Unable to watch directory %s as it cannot be resolved.',
|
||||
path,
|
||||
exc_info=True,
|
||||
)
|
||||
return
|
||||
logger.debug('Watching dir %s with glob %s.', path, glob)
|
||||
self.directory_globs[path].add(glob)
|
||||
|
||||
def watched_files(self, include_globs=True):
|
||||
"""
|
||||
Yield all files that need to be watched, including module files and
|
||||
files within globs.
|
||||
"""
|
||||
yield from iter_all_python_module_files()
|
||||
yield from self.extra_files
|
||||
if include_globs:
|
||||
for directory, patterns in self.directory_globs.items():
|
||||
for pattern in patterns:
|
||||
yield from directory.glob(pattern)
|
||||
|
||||
def wait_for_apps_ready(self, app_reg, django_main_thread):
|
||||
"""
|
||||
Wait until Django reports that the apps have been loaded. If the given
|
||||
thread has terminated before the apps are ready, then a SyntaxError or
|
||||
other non-recoverable error has been raised. In that case, stop waiting
|
||||
for the apps_ready event and continue processing.
|
||||
|
||||
Return True if the thread is alive and the ready event has been
|
||||
triggered, or False if the thread is terminated while waiting for the
|
||||
event.
|
||||
"""
|
||||
while django_main_thread.is_alive():
|
||||
if app_reg.ready_event.wait(timeout=0.1):
|
||||
return True
|
||||
else:
|
||||
logger.debug('Main Django thread has terminated before apps are ready.')
|
||||
return False
|
||||
|
||||
def run(self, django_main_thread):
|
||||
logger.debug('Waiting for apps ready_event.')
|
||||
self.wait_for_apps_ready(apps, django_main_thread)
|
||||
from django.urls import get_resolver
|
||||
# Prevent a race condition where URL modules aren't loaded when the
|
||||
# reloader starts by accessing the urlconf_module property.
|
||||
try:
|
||||
get_resolver().urlconf_module
|
||||
except Exception:
|
||||
# Loading the urlconf can result in errors during development.
|
||||
# If this occurs then swallow the error and continue.
|
||||
pass
|
||||
logger.debug('Apps ready_event triggered. Sending autoreload_started signal.')
|
||||
autoreload_started.send(sender=self)
|
||||
self.run_loop()
|
||||
|
||||
def run_loop(self):
|
||||
ticker = self.tick()
|
||||
while not self.should_stop:
|
||||
try:
|
||||
next(ticker)
|
||||
except StopIteration:
|
||||
break
|
||||
self.stop()
|
||||
|
||||
def tick(self):
|
||||
"""
|
||||
This generator is called in a loop from run_loop. It's important that
|
||||
the method takes care of pausing or otherwise waiting for a period of
|
||||
time. This split between run_loop() and tick() is to improve the
|
||||
testability of the reloader implementations by decoupling the work they
|
||||
do from the loop.
|
||||
"""
|
||||
raise NotImplementedError('subclasses must implement tick().')
|
||||
|
||||
@classmethod
|
||||
def check_availability(cls):
|
||||
raise NotImplementedError('subclasses must implement check_availability().')
|
||||
|
||||
def notify_file_changed(self, path):
|
||||
results = file_changed.send(sender=self, file_path=path)
|
||||
logger.debug('%s notified as changed. Signal results: %s.', path, results)
|
||||
if not any(res[1] for res in results):
|
||||
trigger_reload(path)
|
||||
|
||||
# These are primarily used for testing.
|
||||
@property
|
||||
def should_stop(self):
|
||||
return self._stop_condition.is_set()
|
||||
|
||||
def stop(self):
|
||||
self._stop_condition.set()
|
||||
|
||||
|
||||
class StatReloader(BaseReloader):
|
||||
SLEEP_TIME = 1 # Check for changes once per second.
|
||||
|
||||
def tick(self):
|
||||
mtimes = {}
|
||||
while True:
|
||||
for filepath, mtime in self.snapshot_files():
|
||||
old_time = mtimes.get(filepath)
|
||||
mtimes[filepath] = mtime
|
||||
if old_time is None:
|
||||
logger.debug('File %s first seen with mtime %s', filepath, mtime)
|
||||
continue
|
||||
elif mtime > old_time:
|
||||
logger.debug('File %s previous mtime: %s, current mtime: %s', filepath, old_time, mtime)
|
||||
self.notify_file_changed(filepath)
|
||||
|
||||
time.sleep(self.SLEEP_TIME)
|
||||
yield
|
||||
|
||||
def snapshot_files(self):
|
||||
# watched_files may produce duplicate paths if globs overlap.
|
||||
seen_files = set()
|
||||
for file in self.watched_files():
|
||||
if file in seen_files:
|
||||
continue
|
||||
try:
|
||||
mtime = file.stat().st_mtime
|
||||
except OSError:
|
||||
# This is thrown when the file does not exist.
|
||||
continue
|
||||
seen_files.add(file)
|
||||
yield file, mtime
|
||||
|
||||
@classmethod
|
||||
def check_availability(cls):
|
||||
return True
|
||||
|
||||
|
||||
class WatchmanUnavailable(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class WatchmanReloader(BaseReloader):
|
||||
def __init__(self):
|
||||
self.roots = defaultdict(set)
|
||||
self.processed_request = threading.Event()
|
||||
self.client_timeout = int(os.environ.get('DJANGO_WATCHMAN_TIMEOUT', 5))
|
||||
super().__init__()
|
||||
|
||||
@cached_property
|
||||
def client(self):
|
||||
return pywatchman.client(timeout=self.client_timeout)
|
||||
|
||||
def _watch_root(self, root):
|
||||
# In practice this shouldn't occur, however, it's possible that a
|
||||
# directory that doesn't exist yet is being watched. If it's outside of
|
||||
# sys.path then this will end up a new root. How to handle this isn't
|
||||
# clear: Not adding the root will likely break when subscribing to the
|
||||
# changes, however, as this is currently an internal API, no files
|
||||
# will be being watched outside of sys.path. Fixing this by checking
|
||||
# inside watch_glob() and watch_dir() is expensive, instead this could
|
||||
# could fall back to the StatReloader if this case is detected? For
|
||||
# now, watching its parent, if possible, is sufficient.
|
||||
if not root.exists():
|
||||
if not root.parent.exists():
|
||||
logger.warning('Unable to watch root dir %s as neither it or its parent exist.', root)
|
||||
return
|
||||
root = root.parent
|
||||
result = self.client.query('watch-project', str(root.absolute()))
|
||||
if 'warning' in result:
|
||||
logger.warning('Watchman warning: %s', result['warning'])
|
||||
logger.debug('Watchman watch-project result: %s', result)
|
||||
return result['watch'], result.get('relative_path')
|
||||
|
||||
@functools.lru_cache()
|
||||
def _get_clock(self, root):
|
||||
return self.client.query('clock', root)['clock']
|
||||
|
||||
def _subscribe(self, directory, name, expression):
|
||||
root, rel_path = self._watch_root(directory)
|
||||
query = {
|
||||
'expression': expression,
|
||||
'fields': ['name'],
|
||||
'since': self._get_clock(root),
|
||||
'dedup_results': True,
|
||||
}
|
||||
if rel_path:
|
||||
query['relative_root'] = rel_path
|
||||
logger.debug('Issuing watchman subscription %s, for root %s. Query: %s', name, root, query)
|
||||
self.client.query('subscribe', root, name, query)
|
||||
|
||||
def _subscribe_dir(self, directory, filenames):
|
||||
if not directory.exists():
|
||||
if not directory.parent.exists():
|
||||
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
|
||||
return
|
||||
prefix = 'files-parent-%s' % directory.name
|
||||
filenames = ['%s/%s' % (directory.name, filename) for filename in filenames]
|
||||
directory = directory.parent
|
||||
expression = ['name', filenames, 'wholename']
|
||||
else:
|
||||
prefix = 'files'
|
||||
expression = ['name', filenames]
|
||||
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
|
||||
|
||||
def _watch_glob(self, directory, patterns):
|
||||
"""
|
||||
Watch a directory with a specific glob. If the directory doesn't yet
|
||||
exist, attempt to watch the parent directory and amend the patterns to
|
||||
include this. It's important this method isn't called more than one per
|
||||
directory when updating all subscriptions. Subsequent calls will
|
||||
overwrite the named subscription, so it must include all possible glob
|
||||
expressions.
|
||||
"""
|
||||
prefix = 'glob'
|
||||
if not directory.exists():
|
||||
if not directory.parent.exists():
|
||||
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
|
||||
return
|
||||
prefix = 'glob-parent-%s' % directory.name
|
||||
patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns]
|
||||
directory = directory.parent
|
||||
|
||||
expression = ['anyof']
|
||||
for pattern in patterns:
|
||||
expression.append(['match', pattern, 'wholename'])
|
||||
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
|
||||
|
||||
def watched_roots(self, watched_files):
|
||||
extra_directories = self.directory_globs.keys()
|
||||
watched_file_dirs = [f.parent for f in watched_files]
|
||||
sys_paths = list(sys_path_directories())
|
||||
return frozenset((*extra_directories, *watched_file_dirs, *sys_paths))
|
||||
|
||||
def _update_watches(self):
|
||||
watched_files = list(self.watched_files(include_globs=False))
|
||||
found_roots = common_roots(self.watched_roots(watched_files))
|
||||
logger.debug('Watching %s files', len(watched_files))
|
||||
logger.debug('Found common roots: %s', found_roots)
|
||||
# Setup initial roots for performance, shortest roots first.
|
||||
for root in sorted(found_roots):
|
||||
self._watch_root(root)
|
||||
for directory, patterns in self.directory_globs.items():
|
||||
self._watch_glob(directory, patterns)
|
||||
# Group sorted watched_files by their parent directory.
|
||||
sorted_files = sorted(watched_files, key=lambda p: p.parent)
|
||||
for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent):
|
||||
# These paths need to be relative to the parent directory.
|
||||
self._subscribe_dir(directory, [str(p.relative_to(directory)) for p in group])
|
||||
|
||||
def update_watches(self):
|
||||
try:
|
||||
self._update_watches()
|
||||
except Exception as ex:
|
||||
# If the service is still available, raise the original exception.
|
||||
if self.check_server_status(ex):
|
||||
raise
|
||||
|
||||
def _check_subscription(self, sub):
|
||||
subscription = self.client.getSubscription(sub)
|
||||
if not subscription:
|
||||
return
|
||||
logger.debug('Watchman subscription %s has results.', sub)
|
||||
for result in subscription:
|
||||
# When using watch-project, it's not simple to get the relative
|
||||
# directory without storing some specific state. Store the full
|
||||
# path to the directory in the subscription name, prefixed by its
|
||||
# type (glob, files).
|
||||
root_directory = Path(result['subscription'].split(':', 1)[1])
|
||||
logger.debug('Found root directory %s', root_directory)
|
||||
for file in result.get('files', []):
|
||||
self.notify_file_changed(root_directory / file)
|
||||
|
||||
def request_processed(self, **kwargs):
|
||||
logger.debug('Request processed. Setting update_watches event.')
|
||||
self.processed_request.set()
|
||||
|
||||
def tick(self):
|
||||
request_finished.connect(self.request_processed)
|
||||
self.update_watches()
|
||||
while True:
|
||||
if self.processed_request.is_set():
|
||||
self.update_watches()
|
||||
self.processed_request.clear()
|
||||
try:
|
||||
self.client.receive()
|
||||
except pywatchman.SocketTimeout:
|
||||
pass
|
||||
except pywatchman.WatchmanError as ex:
|
||||
logger.debug('Watchman error: %s, checking server status.', ex)
|
||||
self.check_server_status(ex)
|
||||
else:
|
||||
for sub in list(self.client.subs.keys()):
|
||||
self._check_subscription(sub)
|
||||
yield
|
||||
|
||||
def stop(self):
|
||||
self.client.close()
|
||||
super().stop()
|
||||
|
||||
def check_server_status(self, inner_ex=None):
|
||||
"""Return True if the server is available."""
|
||||
try:
|
||||
self.client.query('version')
|
||||
except Exception:
|
||||
raise WatchmanUnavailable(str(inner_ex)) from inner_ex
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def check_availability(cls):
|
||||
if not pywatchman:
|
||||
raise WatchmanUnavailable('pywatchman not installed.')
|
||||
client = pywatchman.client(timeout=0.1)
|
||||
try:
|
||||
result = client.capabilityCheck()
|
||||
except Exception:
|
||||
# The service is down?
|
||||
raise WatchmanUnavailable('Cannot connect to the watchman service.')
|
||||
version = get_version_tuple(result['version'])
|
||||
# Watchman 4.9 includes multiple improvements to watching project
|
||||
# directories as well as case insensitive filesystems.
|
||||
logger.debug('Watchman version %s', version)
|
||||
if version < (4, 9):
|
||||
raise WatchmanUnavailable('Watchman 4.9 or later is required.')
|
||||
|
||||
|
||||
def get_reloader():
|
||||
"""Return the most suitable reloader for this environment."""
|
||||
try:
|
||||
WatchmanReloader.check_availability()
|
||||
except WatchmanUnavailable:
|
||||
return StatReloader()
|
||||
return WatchmanReloader()
|
||||
|
||||
|
||||
def start_django(reloader, main_func, *args, **kwargs):
|
||||
ensure_echo_on()
|
||||
|
||||
main_func = check_errors(main_func)
|
||||
django_main_thread = threading.Thread(target=main_func, args=args, kwargs=kwargs, name='django-main-thread')
|
||||
django_main_thread.setDaemon(True)
|
||||
django_main_thread.start()
|
||||
|
||||
while not reloader.should_stop:
|
||||
try:
|
||||
reloader.run(django_main_thread)
|
||||
except WatchmanUnavailable as ex:
|
||||
# It's possible that the watchman service shuts down or otherwise
|
||||
# becomes unavailable. In that case, use the StatReloader.
|
||||
reloader = StatReloader()
|
||||
logger.error('Error connecting to Watchman: %s', ex)
|
||||
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
|
||||
|
||||
|
||||
def run_with_reloader(main_func, *args, **kwargs):
|
||||
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
|
||||
try:
|
||||
if os.environ.get(DJANGO_AUTORELOAD_ENV) == 'true':
|
||||
reloader = get_reloader()
|
||||
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
|
||||
start_django(reloader, main_func, *args, **kwargs)
|
||||
else:
|
||||
exit_code = restart_with_reloader()
|
||||
sys.exit(exit_code)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
101
venv/lib/python3.8/site-packages/django/utils/baseconv.py
Normal file
101
venv/lib/python3.8/site-packages/django/utils/baseconv.py
Normal file
@@ -0,0 +1,101 @@
|
||||
# Copyright (c) 2010 Guilherme Gondim. All rights reserved.
|
||||
# Copyright (c) 2009 Simon Willison. All rights reserved.
|
||||
# Copyright (c) 2002 Drew Perttula. All rights reserved.
|
||||
#
|
||||
# License:
|
||||
# Python Software Foundation License version 2
|
||||
#
|
||||
# See the file "LICENSE" for terms & conditions for usage, and a DISCLAIMER OF
|
||||
# ALL WARRANTIES.
|
||||
#
|
||||
# This Baseconv distribution contains no GNU General Public Licensed (GPLed)
|
||||
# code so it may be used in proprietary projects just like prior ``baseconv``
|
||||
# distributions.
|
||||
#
|
||||
# All trademarks referenced herein are property of their respective holders.
|
||||
#
|
||||
|
||||
"""
|
||||
Convert numbers from base 10 integers to base X strings and back again.
|
||||
|
||||
Sample usage::
|
||||
|
||||
>>> base20 = BaseConverter('0123456789abcdefghij')
|
||||
>>> base20.encode(1234)
|
||||
'31e'
|
||||
>>> base20.decode('31e')
|
||||
1234
|
||||
>>> base20.encode(-1234)
|
||||
'-31e'
|
||||
>>> base20.decode('-31e')
|
||||
-1234
|
||||
>>> base11 = BaseConverter('0123456789-', sign='$')
|
||||
>>> base11.encode('$1234')
|
||||
'$-22'
|
||||
>>> base11.decode('$-22')
|
||||
'$1234'
|
||||
|
||||
"""
|
||||
|
||||
BASE2_ALPHABET = '01'
|
||||
BASE16_ALPHABET = '0123456789ABCDEF'
|
||||
BASE56_ALPHABET = '23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnpqrstuvwxyz'
|
||||
BASE36_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyz'
|
||||
BASE62_ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
|
||||
BASE64_ALPHABET = BASE62_ALPHABET + '-_'
|
||||
|
||||
|
||||
class BaseConverter:
|
||||
decimal_digits = '0123456789'
|
||||
|
||||
def __init__(self, digits, sign='-'):
|
||||
self.sign = sign
|
||||
self.digits = digits
|
||||
if sign in self.digits:
|
||||
raise ValueError('Sign character found in converter base digits.')
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: base%s (%s)>" % (self.__class__.__name__, len(self.digits), self.digits)
|
||||
|
||||
def encode(self, i):
|
||||
neg, value = self.convert(i, self.decimal_digits, self.digits, '-')
|
||||
if neg:
|
||||
return self.sign + value
|
||||
return value
|
||||
|
||||
def decode(self, s):
|
||||
neg, value = self.convert(s, self.digits, self.decimal_digits, self.sign)
|
||||
if neg:
|
||||
value = '-' + value
|
||||
return int(value)
|
||||
|
||||
def convert(self, number, from_digits, to_digits, sign):
|
||||
if str(number)[0] == sign:
|
||||
number = str(number)[1:]
|
||||
neg = 1
|
||||
else:
|
||||
neg = 0
|
||||
|
||||
# make an integer out of the number
|
||||
x = 0
|
||||
for digit in str(number):
|
||||
x = x * len(from_digits) + from_digits.index(digit)
|
||||
|
||||
# create the result in base 'len(to_digits)'
|
||||
if x == 0:
|
||||
res = to_digits[0]
|
||||
else:
|
||||
res = ''
|
||||
while x > 0:
|
||||
digit = x % len(to_digits)
|
||||
res = to_digits[digit] + res
|
||||
x = int(x // len(to_digits))
|
||||
return neg, res
|
||||
|
||||
|
||||
base2 = BaseConverter(BASE2_ALPHABET)
|
||||
base16 = BaseConverter(BASE16_ALPHABET)
|
||||
base36 = BaseConverter(BASE36_ALPHABET)
|
||||
base56 = BaseConverter(BASE56_ALPHABET)
|
||||
base62 = BaseConverter(BASE62_ALPHABET)
|
||||
base64 = BaseConverter(BASE64_ALPHABET, sign='$')
|
||||
392
venv/lib/python3.8/site-packages/django/utils/cache.py
Normal file
392
venv/lib/python3.8/site-packages/django/utils/cache.py
Normal file
@@ -0,0 +1,392 @@
|
||||
"""
|
||||
This module contains helper functions for controlling caching. It does so by
|
||||
managing the "Vary" header of responses. It includes functions to patch the
|
||||
header of response objects directly and decorators that change functions to do
|
||||
that header-patching themselves.
|
||||
|
||||
For information on the Vary header, see:
|
||||
|
||||
https://tools.ietf.org/html/rfc7231#section-7.1.4
|
||||
|
||||
Essentially, the "Vary" HTTP header defines which headers a cache should take
|
||||
into account when building its cache key. Requests with the same path but
|
||||
different header content for headers named in "Vary" need to get different
|
||||
cache keys to prevent delivery of wrong content.
|
||||
|
||||
An example: i18n middleware would need to distinguish caches by the
|
||||
"Accept-language" header.
|
||||
"""
|
||||
import hashlib
|
||||
import re
|
||||
import time
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import caches
|
||||
from django.http import HttpResponse, HttpResponseNotModified
|
||||
from django.utils.encoding import iri_to_uri
|
||||
from django.utils.http import (
|
||||
http_date, parse_etags, parse_http_date_safe, quote_etag,
|
||||
)
|
||||
from django.utils.log import log_response
|
||||
from django.utils.timezone import get_current_timezone_name
|
||||
from django.utils.translation import get_language
|
||||
|
||||
cc_delim_re = re.compile(r'\s*,\s*')
|
||||
|
||||
|
||||
def patch_cache_control(response, **kwargs):
|
||||
"""
|
||||
Patch the Cache-Control header by adding all keyword arguments to it.
|
||||
The transformation is as follows:
|
||||
|
||||
* All keyword parameter names are turned to lowercase, and underscores
|
||||
are converted to hyphens.
|
||||
* If the value of a parameter is True (exactly True, not just a
|
||||
true value), only the parameter name is added to the header.
|
||||
* All other parameters are added with their value, after applying
|
||||
str() to it.
|
||||
"""
|
||||
def dictitem(s):
|
||||
t = s.split('=', 1)
|
||||
if len(t) > 1:
|
||||
return (t[0].lower(), t[1])
|
||||
else:
|
||||
return (t[0].lower(), True)
|
||||
|
||||
def dictvalue(t):
|
||||
if t[1] is True:
|
||||
return t[0]
|
||||
else:
|
||||
return '%s=%s' % (t[0], t[1])
|
||||
|
||||
if response.get('Cache-Control'):
|
||||
cc = cc_delim_re.split(response['Cache-Control'])
|
||||
cc = dict(dictitem(el) for el in cc)
|
||||
else:
|
||||
cc = {}
|
||||
|
||||
# If there's already a max-age header but we're being asked to set a new
|
||||
# max-age, use the minimum of the two ages. In practice this happens when
|
||||
# a decorator and a piece of middleware both operate on a given view.
|
||||
if 'max-age' in cc and 'max_age' in kwargs:
|
||||
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
|
||||
|
||||
# Allow overriding private caching and vice versa
|
||||
if 'private' in cc and 'public' in kwargs:
|
||||
del cc['private']
|
||||
elif 'public' in cc and 'private' in kwargs:
|
||||
del cc['public']
|
||||
|
||||
for (k, v) in kwargs.items():
|
||||
cc[k.replace('_', '-')] = v
|
||||
cc = ', '.join(dictvalue(el) for el in cc.items())
|
||||
response['Cache-Control'] = cc
|
||||
|
||||
|
||||
def get_max_age(response):
|
||||
"""
|
||||
Return the max-age from the response Cache-Control header as an integer,
|
||||
or None if it wasn't found or wasn't an integer.
|
||||
"""
|
||||
if not response.has_header('Cache-Control'):
|
||||
return
|
||||
cc = dict(_to_tuple(el) for el in cc_delim_re.split(response['Cache-Control']))
|
||||
try:
|
||||
return int(cc['max-age'])
|
||||
except (ValueError, TypeError, KeyError):
|
||||
pass
|
||||
|
||||
|
||||
def set_response_etag(response):
|
||||
if not response.streaming:
|
||||
response['ETag'] = quote_etag(hashlib.md5(response.content).hexdigest())
|
||||
return response
|
||||
|
||||
|
||||
def _precondition_failed(request):
|
||||
response = HttpResponse(status=412)
|
||||
log_response(
|
||||
'Precondition Failed: %s', request.path,
|
||||
response=response,
|
||||
request=request,
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def _not_modified(request, response=None):
|
||||
new_response = HttpResponseNotModified()
|
||||
if response:
|
||||
# Preserve the headers required by Section 4.1 of RFC 7232, as well as
|
||||
# Last-Modified.
|
||||
for header in ('Cache-Control', 'Content-Location', 'Date', 'ETag', 'Expires', 'Last-Modified', 'Vary'):
|
||||
if header in response:
|
||||
new_response[header] = response[header]
|
||||
|
||||
# Preserve cookies as per the cookie specification: "If a proxy server
|
||||
# receives a response which contains a Set-cookie header, it should
|
||||
# propagate the Set-cookie header to the client, regardless of whether
|
||||
# the response was 304 (Not Modified) or 200 (OK).
|
||||
# https://curl.haxx.se/rfc/cookie_spec.html
|
||||
new_response.cookies = response.cookies
|
||||
return new_response
|
||||
|
||||
|
||||
def get_conditional_response(request, etag=None, last_modified=None, response=None):
|
||||
# Only return conditional responses on successful requests.
|
||||
if response and not (200 <= response.status_code < 300):
|
||||
return response
|
||||
|
||||
# Get HTTP request headers.
|
||||
if_match_etags = parse_etags(request.META.get('HTTP_IF_MATCH', ''))
|
||||
if_unmodified_since = request.META.get('HTTP_IF_UNMODIFIED_SINCE')
|
||||
if_unmodified_since = if_unmodified_since and parse_http_date_safe(if_unmodified_since)
|
||||
if_none_match_etags = parse_etags(request.META.get('HTTP_IF_NONE_MATCH', ''))
|
||||
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
|
||||
if_modified_since = if_modified_since and parse_http_date_safe(if_modified_since)
|
||||
|
||||
# Step 1 of section 6 of RFC 7232: Test the If-Match precondition.
|
||||
if if_match_etags and not _if_match_passes(etag, if_match_etags):
|
||||
return _precondition_failed(request)
|
||||
|
||||
# Step 2: Test the If-Unmodified-Since precondition.
|
||||
if (not if_match_etags and if_unmodified_since and
|
||||
not _if_unmodified_since_passes(last_modified, if_unmodified_since)):
|
||||
return _precondition_failed(request)
|
||||
|
||||
# Step 3: Test the If-None-Match precondition.
|
||||
if if_none_match_etags and not _if_none_match_passes(etag, if_none_match_etags):
|
||||
if request.method in ('GET', 'HEAD'):
|
||||
return _not_modified(request, response)
|
||||
else:
|
||||
return _precondition_failed(request)
|
||||
|
||||
# Step 4: Test the If-Modified-Since precondition.
|
||||
if (not if_none_match_etags and if_modified_since and
|
||||
not _if_modified_since_passes(last_modified, if_modified_since)):
|
||||
if request.method in ('GET', 'HEAD'):
|
||||
return _not_modified(request, response)
|
||||
|
||||
# Step 5: Test the If-Range precondition (not supported).
|
||||
# Step 6: Return original response since there isn't a conditional response.
|
||||
return response
|
||||
|
||||
|
||||
def _if_match_passes(target_etag, etags):
|
||||
"""
|
||||
Test the If-Match comparison as defined in section 3.1 of RFC 7232.
|
||||
"""
|
||||
if not target_etag:
|
||||
# If there isn't an ETag, then there can't be a match.
|
||||
return False
|
||||
elif etags == ['*']:
|
||||
# The existence of an ETag means that there is "a current
|
||||
# representation for the target resource", even if the ETag is weak,
|
||||
# so there is a match to '*'.
|
||||
return True
|
||||
elif target_etag.startswith('W/'):
|
||||
# A weak ETag can never strongly match another ETag.
|
||||
return False
|
||||
else:
|
||||
# Since the ETag is strong, this will only return True if there's a
|
||||
# strong match.
|
||||
return target_etag in etags
|
||||
|
||||
|
||||
def _if_unmodified_since_passes(last_modified, if_unmodified_since):
|
||||
"""
|
||||
Test the If-Unmodified-Since comparison as defined in section 3.4 of
|
||||
RFC 7232.
|
||||
"""
|
||||
return last_modified and last_modified <= if_unmodified_since
|
||||
|
||||
|
||||
def _if_none_match_passes(target_etag, etags):
|
||||
"""
|
||||
Test the If-None-Match comparison as defined in section 3.2 of RFC 7232.
|
||||
"""
|
||||
if not target_etag:
|
||||
# If there isn't an ETag, then there isn't a match.
|
||||
return True
|
||||
elif etags == ['*']:
|
||||
# The existence of an ETag means that there is "a current
|
||||
# representation for the target resource", so there is a match to '*'.
|
||||
return False
|
||||
else:
|
||||
# The comparison should be weak, so look for a match after stripping
|
||||
# off any weak indicators.
|
||||
target_etag = target_etag.strip('W/')
|
||||
etags = (etag.strip('W/') for etag in etags)
|
||||
return target_etag not in etags
|
||||
|
||||
|
||||
def _if_modified_since_passes(last_modified, if_modified_since):
|
||||
"""
|
||||
Test the If-Modified-Since comparison as defined in section 3.3 of RFC 7232.
|
||||
"""
|
||||
return not last_modified or last_modified > if_modified_since
|
||||
|
||||
|
||||
def patch_response_headers(response, cache_timeout=None):
|
||||
"""
|
||||
Add HTTP caching headers to the given HttpResponse: Expires and
|
||||
Cache-Control.
|
||||
|
||||
Each header is only added if it isn't already set.
|
||||
|
||||
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
|
||||
by default.
|
||||
"""
|
||||
if cache_timeout is None:
|
||||
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
|
||||
if cache_timeout < 0:
|
||||
cache_timeout = 0 # Can't have max-age negative
|
||||
if not response.has_header('Expires'):
|
||||
response['Expires'] = http_date(time.time() + cache_timeout)
|
||||
patch_cache_control(response, max_age=cache_timeout)
|
||||
|
||||
|
||||
def add_never_cache_headers(response):
|
||||
"""
|
||||
Add headers to a response to indicate that a page should never be cached.
|
||||
"""
|
||||
patch_response_headers(response, cache_timeout=-1)
|
||||
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True, private=True)
|
||||
|
||||
|
||||
def patch_vary_headers(response, newheaders):
|
||||
"""
|
||||
Add (or update) the "Vary" header in the given HttpResponse object.
|
||||
newheaders is a list of header names that should be in "Vary". If headers
|
||||
contains an asterisk, then "Vary" header will consist of a single asterisk
|
||||
'*'. Otherwise, existing headers in "Vary" aren't removed.
|
||||
"""
|
||||
# Note that we need to keep the original order intact, because cache
|
||||
# implementations may rely on the order of the Vary contents in, say,
|
||||
# computing an MD5 hash.
|
||||
if response.has_header('Vary'):
|
||||
vary_headers = cc_delim_re.split(response['Vary'])
|
||||
else:
|
||||
vary_headers = []
|
||||
# Use .lower() here so we treat headers as case-insensitive.
|
||||
existing_headers = {header.lower() for header in vary_headers}
|
||||
additional_headers = [newheader for newheader in newheaders
|
||||
if newheader.lower() not in existing_headers]
|
||||
vary_headers += additional_headers
|
||||
if '*' in vary_headers:
|
||||
response['Vary'] = '*'
|
||||
else:
|
||||
response['Vary'] = ', '.join(vary_headers)
|
||||
|
||||
|
||||
def has_vary_header(response, header_query):
|
||||
"""
|
||||
Check to see if the response has a given header name in its Vary header.
|
||||
"""
|
||||
if not response.has_header('Vary'):
|
||||
return False
|
||||
vary_headers = cc_delim_re.split(response['Vary'])
|
||||
existing_headers = {header.lower() for header in vary_headers}
|
||||
return header_query.lower() in existing_headers
|
||||
|
||||
|
||||
def _i18n_cache_key_suffix(request, cache_key):
|
||||
"""If necessary, add the current locale or time zone to the cache key."""
|
||||
if settings.USE_I18N or settings.USE_L10N:
|
||||
# first check if LocaleMiddleware or another middleware added
|
||||
# LANGUAGE_CODE to request, then fall back to the active language
|
||||
# which in turn can also fall back to settings.LANGUAGE_CODE
|
||||
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
|
||||
if settings.USE_TZ:
|
||||
cache_key += '.%s' % get_current_timezone_name()
|
||||
return cache_key
|
||||
|
||||
|
||||
def _generate_cache_key(request, method, headerlist, key_prefix):
|
||||
"""Return a cache key from the headers given in the header list."""
|
||||
ctx = hashlib.md5()
|
||||
for header in headerlist:
|
||||
value = request.META.get(header)
|
||||
if value is not None:
|
||||
ctx.update(value.encode())
|
||||
url = hashlib.md5(iri_to_uri(request.build_absolute_uri()).encode('ascii'))
|
||||
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
|
||||
key_prefix, method, url.hexdigest(), ctx.hexdigest())
|
||||
return _i18n_cache_key_suffix(request, cache_key)
|
||||
|
||||
|
||||
def _generate_cache_header_key(key_prefix, request):
|
||||
"""Return a cache key for the header cache."""
|
||||
url = hashlib.md5(iri_to_uri(request.build_absolute_uri()).encode('ascii'))
|
||||
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
|
||||
key_prefix, url.hexdigest())
|
||||
return _i18n_cache_key_suffix(request, cache_key)
|
||||
|
||||
|
||||
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
|
||||
"""
|
||||
Return a cache key based on the request URL and query. It can be used
|
||||
in the request phase because it pulls the list of headers to take into
|
||||
account from the global URL registry and uses those to build a cache key
|
||||
to check against.
|
||||
|
||||
If there isn't a headerlist stored, return None, indicating that the page
|
||||
needs to be rebuilt.
|
||||
"""
|
||||
if key_prefix is None:
|
||||
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
|
||||
cache_key = _generate_cache_header_key(key_prefix, request)
|
||||
if cache is None:
|
||||
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
|
||||
headerlist = cache.get(cache_key)
|
||||
if headerlist is not None:
|
||||
return _generate_cache_key(request, method, headerlist, key_prefix)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
|
||||
"""
|
||||
Learn what headers to take into account for some request URL from the
|
||||
response object. Store those headers in a global URL registry so that
|
||||
later access to that URL will know what headers to take into account
|
||||
without building the response object itself. The headers are named in the
|
||||
Vary header of the response, but we want to prevent response generation.
|
||||
|
||||
The list of headers to use for cache key generation is stored in the same
|
||||
cache as the pages themselves. If the cache ages some data out of the
|
||||
cache, this just means that we have to build the response once to get at
|
||||
the Vary header and so at the list of headers to use for the cache key.
|
||||
"""
|
||||
if key_prefix is None:
|
||||
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
|
||||
if cache_timeout is None:
|
||||
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
|
||||
cache_key = _generate_cache_header_key(key_prefix, request)
|
||||
if cache is None:
|
||||
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
|
||||
if response.has_header('Vary'):
|
||||
is_accept_language_redundant = settings.USE_I18N or settings.USE_L10N
|
||||
# If i18n or l10n are used, the generated cache key will be suffixed
|
||||
# with the current locale. Adding the raw value of Accept-Language is
|
||||
# redundant in that case and would result in storing the same content
|
||||
# under multiple keys in the cache. See #18191 for details.
|
||||
headerlist = []
|
||||
for header in cc_delim_re.split(response['Vary']):
|
||||
header = header.upper().replace('-', '_')
|
||||
if header != 'ACCEPT_LANGUAGE' or not is_accept_language_redundant:
|
||||
headerlist.append('HTTP_' + header)
|
||||
headerlist.sort()
|
||||
cache.set(cache_key, headerlist, cache_timeout)
|
||||
return _generate_cache_key(request, request.method, headerlist, key_prefix)
|
||||
else:
|
||||
# if there is no Vary header, we still need a cache key
|
||||
# for the request.build_absolute_uri()
|
||||
cache.set(cache_key, [], cache_timeout)
|
||||
return _generate_cache_key(request, request.method, [], key_prefix)
|
||||
|
||||
|
||||
def _to_tuple(s):
|
||||
t = s.split('=', 1)
|
||||
if len(t) == 2:
|
||||
return t[0].lower(), t[1]
|
||||
return t[0].lower(), True
|
||||
61
venv/lib/python3.8/site-packages/django/utils/crypto.py
Normal file
61
venv/lib/python3.8/site-packages/django/utils/crypto.py
Normal file
@@ -0,0 +1,61 @@
|
||||
"""
|
||||
Django's standard crypto functions and utilities.
|
||||
"""
|
||||
import hashlib
|
||||
import hmac
|
||||
import secrets
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.encoding import force_bytes
|
||||
|
||||
|
||||
def salted_hmac(key_salt, value, secret=None):
|
||||
"""
|
||||
Return the HMAC-SHA1 of 'value', using a key generated from key_salt and a
|
||||
secret (which defaults to settings.SECRET_KEY).
|
||||
|
||||
A different key_salt should be passed in for every application of HMAC.
|
||||
"""
|
||||
if secret is None:
|
||||
secret = settings.SECRET_KEY
|
||||
|
||||
key_salt = force_bytes(key_salt)
|
||||
secret = force_bytes(secret)
|
||||
|
||||
# We need to generate a derived key from our base key. We can do this by
|
||||
# passing the key_salt and our base key through a pseudo-random function and
|
||||
# SHA1 works nicely.
|
||||
key = hashlib.sha1(key_salt + secret).digest()
|
||||
|
||||
# If len(key_salt + secret) > sha_constructor().block_size, the above
|
||||
# line is redundant and could be replaced by key = key_salt + secret, since
|
||||
# the hmac module does the same thing for keys longer than the block size.
|
||||
# However, we need to ensure that we *always* do this.
|
||||
return hmac.new(key, msg=force_bytes(value), digestmod=hashlib.sha1)
|
||||
|
||||
|
||||
def get_random_string(length=12,
|
||||
allowed_chars='abcdefghijklmnopqrstuvwxyz'
|
||||
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
|
||||
"""
|
||||
Return a securely generated random string.
|
||||
|
||||
The default length of 12 with the a-z, A-Z, 0-9 character set returns
|
||||
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
|
||||
"""
|
||||
return ''.join(secrets.choice(allowed_chars) for i in range(length))
|
||||
|
||||
|
||||
def constant_time_compare(val1, val2):
|
||||
"""Return True if the two strings are equal, False otherwise."""
|
||||
return secrets.compare_digest(force_bytes(val1), force_bytes(val2))
|
||||
|
||||
|
||||
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
|
||||
"""Return the hash of password using pbkdf2."""
|
||||
if digest is None:
|
||||
digest = hashlib.sha256
|
||||
dklen = dklen or None
|
||||
password = force_bytes(password)
|
||||
salt = force_bytes(salt)
|
||||
return hashlib.pbkdf2_hmac(digest().name, password, salt, iterations, dklen)
|
||||
339
venv/lib/python3.8/site-packages/django/utils/datastructures.py
Normal file
339
venv/lib/python3.8/site-packages/django/utils/datastructures.py
Normal file
@@ -0,0 +1,339 @@
|
||||
import copy
|
||||
from collections.abc import Mapping
|
||||
|
||||
|
||||
class OrderedSet:
|
||||
"""
|
||||
A set which keeps the ordering of the inserted items.
|
||||
"""
|
||||
|
||||
def __init__(self, iterable=None):
|
||||
self.dict = dict.fromkeys(iterable or ())
|
||||
|
||||
def add(self, item):
|
||||
self.dict[item] = None
|
||||
|
||||
def remove(self, item):
|
||||
del self.dict[item]
|
||||
|
||||
def discard(self, item):
|
||||
try:
|
||||
self.remove(item)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.dict)
|
||||
|
||||
def __contains__(self, item):
|
||||
return item in self.dict
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.dict)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.dict)
|
||||
|
||||
|
||||
class MultiValueDictKeyError(KeyError):
|
||||
pass
|
||||
|
||||
|
||||
class MultiValueDict(dict):
|
||||
"""
|
||||
A subclass of dictionary customized to handle multiple values for the
|
||||
same key.
|
||||
|
||||
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
|
||||
>>> d['name']
|
||||
'Simon'
|
||||
>>> d.getlist('name')
|
||||
['Adrian', 'Simon']
|
||||
>>> d.getlist('doesnotexist')
|
||||
[]
|
||||
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
|
||||
['Adrian', 'Simon']
|
||||
>>> d.get('lastname', 'nonexistent')
|
||||
'nonexistent'
|
||||
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
|
||||
|
||||
This class exists to solve the irritating problem raised by cgi.parse_qs,
|
||||
which returns a list for every key, even though most Web forms submit
|
||||
single name-value pairs.
|
||||
"""
|
||||
def __init__(self, key_to_list_mapping=()):
|
||||
super().__init__(key_to_list_mapping)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s>" % (self.__class__.__name__, super().__repr__())
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""
|
||||
Return the last data value for this key, or [] if it's an empty list;
|
||||
raise KeyError if not found.
|
||||
"""
|
||||
try:
|
||||
list_ = super().__getitem__(key)
|
||||
except KeyError:
|
||||
raise MultiValueDictKeyError(key)
|
||||
try:
|
||||
return list_[-1]
|
||||
except IndexError:
|
||||
return []
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
super().__setitem__(key, [value])
|
||||
|
||||
def __copy__(self):
|
||||
return self.__class__([
|
||||
(k, v[:])
|
||||
for k, v in self.lists()
|
||||
])
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
result = self.__class__()
|
||||
memo[id(self)] = result
|
||||
for key, value in dict.items(self):
|
||||
dict.__setitem__(result, copy.deepcopy(key, memo),
|
||||
copy.deepcopy(value, memo))
|
||||
return result
|
||||
|
||||
def __getstate__(self):
|
||||
return {**self.__dict__, '_data': {k: self._getlist(k) for k in self}}
|
||||
|
||||
def __setstate__(self, obj_dict):
|
||||
data = obj_dict.pop('_data', {})
|
||||
for k, v in data.items():
|
||||
self.setlist(k, v)
|
||||
self.__dict__.update(obj_dict)
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""
|
||||
Return the last data value for the passed key. If key doesn't exist
|
||||
or value is an empty list, return `default`.
|
||||
"""
|
||||
try:
|
||||
val = self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
if val == []:
|
||||
return default
|
||||
return val
|
||||
|
||||
def _getlist(self, key, default=None, force_list=False):
|
||||
"""
|
||||
Return a list of values for the key.
|
||||
|
||||
Used internally to manipulate values list. If force_list is True,
|
||||
return a new copy of values.
|
||||
"""
|
||||
try:
|
||||
values = super().__getitem__(key)
|
||||
except KeyError:
|
||||
if default is None:
|
||||
return []
|
||||
return default
|
||||
else:
|
||||
if force_list:
|
||||
values = list(values) if values is not None else None
|
||||
return values
|
||||
|
||||
def getlist(self, key, default=None):
|
||||
"""
|
||||
Return the list of values for the key. If key doesn't exist, return a
|
||||
default value.
|
||||
"""
|
||||
return self._getlist(key, default, force_list=True)
|
||||
|
||||
def setlist(self, key, list_):
|
||||
super().__setitem__(key, list_)
|
||||
|
||||
def setdefault(self, key, default=None):
|
||||
if key not in self:
|
||||
self[key] = default
|
||||
# Do not return default here because __setitem__() may store
|
||||
# another value -- QueryDict.__setitem__() does. Look it up.
|
||||
return self[key]
|
||||
|
||||
def setlistdefault(self, key, default_list=None):
|
||||
if key not in self:
|
||||
if default_list is None:
|
||||
default_list = []
|
||||
self.setlist(key, default_list)
|
||||
# Do not return default_list here because setlist() may store
|
||||
# another value -- QueryDict.setlist() does. Look it up.
|
||||
return self._getlist(key)
|
||||
|
||||
def appendlist(self, key, value):
|
||||
"""Append an item to the internal list associated with key."""
|
||||
self.setlistdefault(key).append(value)
|
||||
|
||||
def items(self):
|
||||
"""
|
||||
Yield (key, value) pairs, where value is the last item in the list
|
||||
associated with the key.
|
||||
"""
|
||||
for key in self:
|
||||
yield key, self[key]
|
||||
|
||||
def lists(self):
|
||||
"""Yield (key, list) pairs."""
|
||||
return iter(super().items())
|
||||
|
||||
def values(self):
|
||||
"""Yield the last value on every key list."""
|
||||
for key in self:
|
||||
yield self[key]
|
||||
|
||||
def copy(self):
|
||||
"""Return a shallow copy of this object."""
|
||||
return copy.copy(self)
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
"""Extend rather than replace existing key lists."""
|
||||
if len(args) > 1:
|
||||
raise TypeError("update expected at most 1 argument, got %d" % len(args))
|
||||
if args:
|
||||
other_dict = args[0]
|
||||
if isinstance(other_dict, MultiValueDict):
|
||||
for key, value_list in other_dict.lists():
|
||||
self.setlistdefault(key).extend(value_list)
|
||||
else:
|
||||
try:
|
||||
for key, value in other_dict.items():
|
||||
self.setlistdefault(key).append(value)
|
||||
except TypeError:
|
||||
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
|
||||
for key, value in kwargs.items():
|
||||
self.setlistdefault(key).append(value)
|
||||
|
||||
def dict(self):
|
||||
"""Return current object as a dict with singular values."""
|
||||
return {key: self[key] for key in self}
|
||||
|
||||
|
||||
class ImmutableList(tuple):
|
||||
"""
|
||||
A tuple-like object that raises useful errors when it is asked to mutate.
|
||||
|
||||
Example::
|
||||
|
||||
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
|
||||
>>> a[3] = '4'
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AttributeError: You cannot mutate this.
|
||||
"""
|
||||
|
||||
def __new__(cls, *args, warning='ImmutableList object is immutable.', **kwargs):
|
||||
self = tuple.__new__(cls, *args, **kwargs)
|
||||
self.warning = warning
|
||||
return self
|
||||
|
||||
def complain(self, *wargs, **kwargs):
|
||||
if isinstance(self.warning, Exception):
|
||||
raise self.warning
|
||||
else:
|
||||
raise AttributeError(self.warning)
|
||||
|
||||
# All list mutation functions complain.
|
||||
__delitem__ = complain
|
||||
__delslice__ = complain
|
||||
__iadd__ = complain
|
||||
__imul__ = complain
|
||||
__setitem__ = complain
|
||||
__setslice__ = complain
|
||||
append = complain
|
||||
extend = complain
|
||||
insert = complain
|
||||
pop = complain
|
||||
remove = complain
|
||||
sort = complain
|
||||
reverse = complain
|
||||
|
||||
|
||||
class DictWrapper(dict):
|
||||
"""
|
||||
Wrap accesses to a dictionary so that certain values (those starting with
|
||||
the specified prefix) are passed through a function before being returned.
|
||||
The prefix is removed before looking up the real value.
|
||||
|
||||
Used by the SQL construction code to ensure that values are correctly
|
||||
quoted before being used.
|
||||
"""
|
||||
def __init__(self, data, func, prefix):
|
||||
super().__init__(data)
|
||||
self.func = func
|
||||
self.prefix = prefix
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""
|
||||
Retrieve the real value after stripping the prefix string (if
|
||||
present). If the prefix is present, pass the value through self.func
|
||||
before returning, otherwise return the raw value.
|
||||
"""
|
||||
use_func = key.startswith(self.prefix)
|
||||
if use_func:
|
||||
key = key[len(self.prefix):]
|
||||
value = super().__getitem__(key)
|
||||
if use_func:
|
||||
return self.func(value)
|
||||
return value
|
||||
|
||||
|
||||
def _destruct_iterable_mapping_values(data):
|
||||
for i, elem in enumerate(data):
|
||||
if len(elem) != 2:
|
||||
raise ValueError(
|
||||
'dictionary update sequence element #{} has '
|
||||
'length {}; 2 is required.'.format(i, len(elem))
|
||||
)
|
||||
if not isinstance(elem[0], str):
|
||||
raise ValueError('Element key %r invalid, only strings are allowed' % elem[0])
|
||||
yield tuple(elem)
|
||||
|
||||
|
||||
class CaseInsensitiveMapping(Mapping):
|
||||
"""
|
||||
Mapping allowing case-insensitive key lookups. Original case of keys is
|
||||
preserved for iteration and string representation.
|
||||
|
||||
Example::
|
||||
|
||||
>>> ci_map = CaseInsensitiveMapping({'name': 'Jane'})
|
||||
>>> ci_map['Name']
|
||||
Jane
|
||||
>>> ci_map['NAME']
|
||||
Jane
|
||||
>>> ci_map['name']
|
||||
Jane
|
||||
>>> ci_map # original case preserved
|
||||
{'name': 'Jane'}
|
||||
"""
|
||||
|
||||
def __init__(self, data):
|
||||
if not isinstance(data, Mapping):
|
||||
data = {k: v for k, v in _destruct_iterable_mapping_values(data)}
|
||||
self._store = {k.lower(): (k, v) for k, v in data.items()}
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self._store[key.lower()][1]
|
||||
|
||||
def __len__(self):
|
||||
return len(self._store)
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, Mapping) and {
|
||||
k.lower(): v for k, v in self.items()
|
||||
} == {
|
||||
k.lower(): v for k, v in other.items()
|
||||
}
|
||||
|
||||
def __iter__(self):
|
||||
return (original_key for original_key, value in self._store.values())
|
||||
|
||||
def __repr__(self):
|
||||
return repr({key: value for key, value in self._store.values()})
|
||||
|
||||
def copy(self):
|
||||
return self
|
||||
367
venv/lib/python3.8/site-packages/django/utils/dateformat.py
Normal file
367
venv/lib/python3.8/site-packages/django/utils/dateformat.py
Normal file
@@ -0,0 +1,367 @@
|
||||
"""
|
||||
PHP date() style date formatting
|
||||
See http://www.php.net/date for format strings
|
||||
|
||||
Usage:
|
||||
>>> import datetime
|
||||
>>> d = datetime.datetime.now()
|
||||
>>> df = DateFormat(d)
|
||||
>>> print(df.format('jS F Y H:i'))
|
||||
7th October 2003 11:39
|
||||
>>>
|
||||
"""
|
||||
import calendar
|
||||
import datetime
|
||||
import re
|
||||
import time
|
||||
|
||||
from django.utils.dates import (
|
||||
MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR,
|
||||
)
|
||||
from django.utils.timezone import get_default_timezone, is_aware, is_naive
|
||||
from django.utils.translation import gettext as _
|
||||
|
||||
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
|
||||
re_escaped = re.compile(r'\\(.)')
|
||||
|
||||
|
||||
class Formatter:
|
||||
def format(self, formatstr):
|
||||
pieces = []
|
||||
for i, piece in enumerate(re_formatchars.split(str(formatstr))):
|
||||
if i % 2:
|
||||
if type(self.data) is datetime.date and hasattr(TimeFormat, piece):
|
||||
raise TypeError(
|
||||
"The format for date objects may not contain "
|
||||
"time-related format specifiers (found '%s')." % piece
|
||||
)
|
||||
pieces.append(str(getattr(self, piece)()))
|
||||
elif piece:
|
||||
pieces.append(re_escaped.sub(r'\1', piece))
|
||||
return ''.join(pieces)
|
||||
|
||||
|
||||
class TimeFormat(Formatter):
|
||||
|
||||
def __init__(self, obj):
|
||||
self.data = obj
|
||||
self.timezone = None
|
||||
|
||||
# We only support timezone when formatting datetime objects,
|
||||
# not date objects (timezone information not appropriate),
|
||||
# or time objects (against established django policy).
|
||||
if isinstance(obj, datetime.datetime):
|
||||
if is_naive(obj):
|
||||
self.timezone = get_default_timezone()
|
||||
else:
|
||||
self.timezone = obj.tzinfo
|
||||
|
||||
def a(self):
|
||||
"'a.m.' or 'p.m.'"
|
||||
if self.data.hour > 11:
|
||||
return _('p.m.')
|
||||
return _('a.m.')
|
||||
|
||||
def A(self):
|
||||
"'AM' or 'PM'"
|
||||
if self.data.hour > 11:
|
||||
return _('PM')
|
||||
return _('AM')
|
||||
|
||||
def B(self):
|
||||
"Swatch Internet time"
|
||||
raise NotImplementedError('may be implemented in a future release')
|
||||
|
||||
def e(self):
|
||||
"""
|
||||
Timezone name.
|
||||
|
||||
If timezone information is not available, return an empty string.
|
||||
"""
|
||||
if not self.timezone:
|
||||
return ""
|
||||
|
||||
try:
|
||||
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
|
||||
return self.data.tzname() or ''
|
||||
except NotImplementedError:
|
||||
pass
|
||||
return ""
|
||||
|
||||
def f(self):
|
||||
"""
|
||||
Time, in 12-hour hours and minutes, with minutes left off if they're
|
||||
zero.
|
||||
Examples: '1', '1:30', '2:05', '2'
|
||||
Proprietary extension.
|
||||
"""
|
||||
if self.data.minute == 0:
|
||||
return self.g()
|
||||
return '%s:%s' % (self.g(), self.i())
|
||||
|
||||
def g(self):
|
||||
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
|
||||
if self.data.hour == 0:
|
||||
return 12
|
||||
if self.data.hour > 12:
|
||||
return self.data.hour - 12
|
||||
return self.data.hour
|
||||
|
||||
def G(self):
|
||||
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
|
||||
return self.data.hour
|
||||
|
||||
def h(self):
|
||||
"Hour, 12-hour format; i.e. '01' to '12'"
|
||||
return '%02d' % self.g()
|
||||
|
||||
def H(self):
|
||||
"Hour, 24-hour format; i.e. '00' to '23'"
|
||||
return '%02d' % self.G()
|
||||
|
||||
def i(self):
|
||||
"Minutes; i.e. '00' to '59'"
|
||||
return '%02d' % self.data.minute
|
||||
|
||||
def O(self): # NOQA: E743
|
||||
"""
|
||||
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
|
||||
|
||||
If timezone information is not available, return an empty string.
|
||||
"""
|
||||
if not self.timezone:
|
||||
return ""
|
||||
|
||||
seconds = self.Z()
|
||||
if seconds == "":
|
||||
return ""
|
||||
sign = '-' if seconds < 0 else '+'
|
||||
seconds = abs(seconds)
|
||||
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
|
||||
|
||||
def P(self):
|
||||
"""
|
||||
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
|
||||
if they're zero and the strings 'midnight' and 'noon' if appropriate.
|
||||
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
|
||||
Proprietary extension.
|
||||
"""
|
||||
if self.data.minute == 0 and self.data.hour == 0:
|
||||
return _('midnight')
|
||||
if self.data.minute == 0 and self.data.hour == 12:
|
||||
return _('noon')
|
||||
return '%s %s' % (self.f(), self.a())
|
||||
|
||||
def s(self):
|
||||
"Seconds; i.e. '00' to '59'"
|
||||
return '%02d' % self.data.second
|
||||
|
||||
def T(self):
|
||||
"""
|
||||
Time zone of this machine; e.g. 'EST' or 'MDT'.
|
||||
|
||||
If timezone information is not available, return an empty string.
|
||||
"""
|
||||
if not self.timezone:
|
||||
return ""
|
||||
|
||||
name = None
|
||||
try:
|
||||
name = self.timezone.tzname(self.data)
|
||||
except Exception:
|
||||
# pytz raises AmbiguousTimeError during the autumn DST change.
|
||||
# This happens mainly when __init__ receives a naive datetime
|
||||
# and sets self.timezone = get_default_timezone().
|
||||
pass
|
||||
if name is None:
|
||||
name = self.format('O')
|
||||
return str(name)
|
||||
|
||||
def u(self):
|
||||
"Microseconds; i.e. '000000' to '999999'"
|
||||
return '%06d' % self.data.microsecond
|
||||
|
||||
def Z(self):
|
||||
"""
|
||||
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
|
||||
timezones west of UTC is always negative, and for those east of UTC is
|
||||
always positive.
|
||||
|
||||
If timezone information is not available, return an empty string.
|
||||
"""
|
||||
if not self.timezone:
|
||||
return ""
|
||||
|
||||
try:
|
||||
offset = self.timezone.utcoffset(self.data)
|
||||
except Exception:
|
||||
# pytz raises AmbiguousTimeError during the autumn DST change.
|
||||
# This happens mainly when __init__ receives a naive datetime
|
||||
# and sets self.timezone = get_default_timezone().
|
||||
return ""
|
||||
|
||||
# `offset` is a datetime.timedelta. For negative values (to the west of
|
||||
# UTC) only days can be negative (days=-1) and seconds are always
|
||||
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
|
||||
# Positive offsets have days=0
|
||||
return offset.days * 86400 + offset.seconds
|
||||
|
||||
|
||||
class DateFormat(TimeFormat):
|
||||
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
|
||||
|
||||
def b(self):
|
||||
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
|
||||
return MONTHS_3[self.data.month]
|
||||
|
||||
def c(self):
|
||||
"""
|
||||
ISO 8601 Format
|
||||
Example : '2008-01-02T10:30:00.000123'
|
||||
"""
|
||||
return self.data.isoformat()
|
||||
|
||||
def d(self):
|
||||
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
|
||||
return '%02d' % self.data.day
|
||||
|
||||
def D(self):
|
||||
"Day of the week, textual, 3 letters; e.g. 'Fri'"
|
||||
return WEEKDAYS_ABBR[self.data.weekday()]
|
||||
|
||||
def E(self):
|
||||
"Alternative month names as required by some locales. Proprietary extension."
|
||||
return MONTHS_ALT[self.data.month]
|
||||
|
||||
def F(self):
|
||||
"Month, textual, long; e.g. 'January'"
|
||||
return MONTHS[self.data.month]
|
||||
|
||||
def I(self): # NOQA: E743
|
||||
"'1' if Daylight Savings Time, '0' otherwise."
|
||||
try:
|
||||
if self.timezone and self.timezone.dst(self.data):
|
||||
return '1'
|
||||
else:
|
||||
return '0'
|
||||
except Exception:
|
||||
# pytz raises AmbiguousTimeError during the autumn DST change.
|
||||
# This happens mainly when __init__ receives a naive datetime
|
||||
# and sets self.timezone = get_default_timezone().
|
||||
return ''
|
||||
|
||||
def j(self):
|
||||
"Day of the month without leading zeros; i.e. '1' to '31'"
|
||||
return self.data.day
|
||||
|
||||
def l(self): # NOQA: E743
|
||||
"Day of the week, textual, long; e.g. 'Friday'"
|
||||
return WEEKDAYS[self.data.weekday()]
|
||||
|
||||
def L(self):
|
||||
"Boolean for whether it is a leap year; i.e. True or False"
|
||||
return calendar.isleap(self.data.year)
|
||||
|
||||
def m(self):
|
||||
"Month; i.e. '01' to '12'"
|
||||
return '%02d' % self.data.month
|
||||
|
||||
def M(self):
|
||||
"Month, textual, 3 letters; e.g. 'Jan'"
|
||||
return MONTHS_3[self.data.month].title()
|
||||
|
||||
def n(self):
|
||||
"Month without leading zeros; i.e. '1' to '12'"
|
||||
return self.data.month
|
||||
|
||||
def N(self):
|
||||
"Month abbreviation in Associated Press style. Proprietary extension."
|
||||
return MONTHS_AP[self.data.month]
|
||||
|
||||
def o(self):
|
||||
"ISO 8601 year number matching the ISO week number (W)"
|
||||
return self.data.isocalendar()[0]
|
||||
|
||||
def r(self):
|
||||
"RFC 5322 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
|
||||
return self.format('D, j M Y H:i:s O')
|
||||
|
||||
def S(self):
|
||||
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
|
||||
if self.data.day in (11, 12, 13): # Special case
|
||||
return 'th'
|
||||
last = self.data.day % 10
|
||||
if last == 1:
|
||||
return 'st'
|
||||
if last == 2:
|
||||
return 'nd'
|
||||
if last == 3:
|
||||
return 'rd'
|
||||
return 'th'
|
||||
|
||||
def t(self):
|
||||
"Number of days in the given month; i.e. '28' to '31'"
|
||||
return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
|
||||
|
||||
def U(self):
|
||||
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
|
||||
if isinstance(self.data, datetime.datetime) and is_aware(self.data):
|
||||
return int(calendar.timegm(self.data.utctimetuple()))
|
||||
else:
|
||||
return int(time.mktime(self.data.timetuple()))
|
||||
|
||||
def w(self):
|
||||
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
|
||||
return (self.data.weekday() + 1) % 7
|
||||
|
||||
def W(self):
|
||||
"ISO-8601 week number of year, weeks starting on Monday"
|
||||
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
|
||||
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
|
||||
weekday = self.data.weekday() + 1
|
||||
day_of_year = self.z()
|
||||
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
|
||||
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year - 1)):
|
||||
week_number = 53
|
||||
else:
|
||||
week_number = 52
|
||||
else:
|
||||
if calendar.isleap(self.data.year):
|
||||
i = 366
|
||||
else:
|
||||
i = 365
|
||||
if (i - day_of_year) < (4 - weekday):
|
||||
week_number = 1
|
||||
else:
|
||||
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
|
||||
week_number = j // 7
|
||||
if jan1_weekday > 4:
|
||||
week_number -= 1
|
||||
return week_number
|
||||
|
||||
def y(self):
|
||||
"Year, 2 digits; e.g. '99'"
|
||||
return str(self.data.year)[2:]
|
||||
|
||||
def Y(self):
|
||||
"Year, 4 digits; e.g. '1999'"
|
||||
return self.data.year
|
||||
|
||||
def z(self):
|
||||
"""Day of the year, i.e. 1 to 366."""
|
||||
doy = self.year_days[self.data.month] + self.data.day
|
||||
if self.L() and self.data.month > 2:
|
||||
doy += 1
|
||||
return doy
|
||||
|
||||
|
||||
def format(value, format_string):
|
||||
"Convenience function"
|
||||
df = DateFormat(value)
|
||||
return df.format(format_string)
|
||||
|
||||
|
||||
def time_format(value, format_string):
|
||||
"Convenience function"
|
||||
tf = TimeFormat(value)
|
||||
return tf.format(format_string)
|
||||
147
venv/lib/python3.8/site-packages/django/utils/dateparse.py
Normal file
147
venv/lib/python3.8/site-packages/django/utils/dateparse.py
Normal file
@@ -0,0 +1,147 @@
|
||||
"""Functions to parse datetime objects."""
|
||||
|
||||
# We're using regular expressions rather than time.strptime because:
|
||||
# - They provide both validation and parsing.
|
||||
# - They're more flexible for datetimes.
|
||||
# - The date/datetime/time constructors produce friendlier error messages.
|
||||
|
||||
import datetime
|
||||
import re
|
||||
|
||||
from django.utils.timezone import get_fixed_timezone, utc
|
||||
|
||||
date_re = re.compile(
|
||||
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$'
|
||||
)
|
||||
|
||||
time_re = re.compile(
|
||||
r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
|
||||
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
|
||||
)
|
||||
|
||||
datetime_re = re.compile(
|
||||
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
|
||||
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
|
||||
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
|
||||
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
|
||||
)
|
||||
|
||||
standard_duration_re = re.compile(
|
||||
r'^'
|
||||
r'(?:(?P<days>-?\d+) (days?, )?)?'
|
||||
r'(?P<sign>-?)'
|
||||
r'((?:(?P<hours>\d+):)(?=\d+:\d+))?'
|
||||
r'(?:(?P<minutes>\d+):)?'
|
||||
r'(?P<seconds>\d+)'
|
||||
r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?'
|
||||
r'$'
|
||||
)
|
||||
|
||||
# Support the sections of ISO 8601 date representation that are accepted by
|
||||
# timedelta
|
||||
iso8601_duration_re = re.compile(
|
||||
r'^(?P<sign>[-+]?)'
|
||||
r'P'
|
||||
r'(?:(?P<days>\d+(.\d+)?)D)?'
|
||||
r'(?:T'
|
||||
r'(?:(?P<hours>\d+(.\d+)?)H)?'
|
||||
r'(?:(?P<minutes>\d+(.\d+)?)M)?'
|
||||
r'(?:(?P<seconds>\d+(.\d+)?)S)?'
|
||||
r')?'
|
||||
r'$'
|
||||
)
|
||||
|
||||
# Support PostgreSQL's day-time interval format, e.g. "3 days 04:05:06". The
|
||||
# year-month and mixed intervals cannot be converted to a timedelta and thus
|
||||
# aren't accepted.
|
||||
postgres_interval_re = re.compile(
|
||||
r'^'
|
||||
r'(?:(?P<days>-?\d+) (days? ?))?'
|
||||
r'(?:(?P<sign>[-+])?'
|
||||
r'(?P<hours>\d+):'
|
||||
r'(?P<minutes>\d\d):'
|
||||
r'(?P<seconds>\d\d)'
|
||||
r'(?:\.(?P<microseconds>\d{1,6}))?'
|
||||
r')?$'
|
||||
)
|
||||
|
||||
|
||||
def parse_date(value):
|
||||
"""Parse a string and return a datetime.date.
|
||||
|
||||
Raise ValueError if the input is well formatted but not a valid date.
|
||||
Return None if the input isn't well formatted.
|
||||
"""
|
||||
match = date_re.match(value)
|
||||
if match:
|
||||
kw = {k: int(v) for k, v in match.groupdict().items()}
|
||||
return datetime.date(**kw)
|
||||
|
||||
|
||||
def parse_time(value):
|
||||
"""Parse a string and return a datetime.time.
|
||||
|
||||
This function doesn't support time zone offsets.
|
||||
|
||||
Raise ValueError if the input is well formatted but not a valid time.
|
||||
Return None if the input isn't well formatted, in particular if it
|
||||
contains an offset.
|
||||
"""
|
||||
match = time_re.match(value)
|
||||
if match:
|
||||
kw = match.groupdict()
|
||||
kw['microsecond'] = kw['microsecond'] and kw['microsecond'].ljust(6, '0')
|
||||
kw = {k: int(v) for k, v in kw.items() if v is not None}
|
||||
return datetime.time(**kw)
|
||||
|
||||
|
||||
def parse_datetime(value):
|
||||
"""Parse a string and return a datetime.datetime.
|
||||
|
||||
This function supports time zone offsets. When the input contains one,
|
||||
the output uses a timezone with a fixed offset from UTC.
|
||||
|
||||
Raise ValueError if the input is well formatted but not a valid datetime.
|
||||
Return None if the input isn't well formatted.
|
||||
"""
|
||||
match = datetime_re.match(value)
|
||||
if match:
|
||||
kw = match.groupdict()
|
||||
kw['microsecond'] = kw['microsecond'] and kw['microsecond'].ljust(6, '0')
|
||||
tzinfo = kw.pop('tzinfo')
|
||||
if tzinfo == 'Z':
|
||||
tzinfo = utc
|
||||
elif tzinfo is not None:
|
||||
offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
|
||||
offset = 60 * int(tzinfo[1:3]) + offset_mins
|
||||
if tzinfo[0] == '-':
|
||||
offset = -offset
|
||||
tzinfo = get_fixed_timezone(offset)
|
||||
kw = {k: int(v) for k, v in kw.items() if v is not None}
|
||||
kw['tzinfo'] = tzinfo
|
||||
return datetime.datetime(**kw)
|
||||
|
||||
|
||||
def parse_duration(value):
|
||||
"""Parse a duration string and return a datetime.timedelta.
|
||||
|
||||
The preferred format for durations in Django is '%d %H:%M:%S.%f'.
|
||||
|
||||
Also supports ISO 8601 representation and PostgreSQL's day-time interval
|
||||
format.
|
||||
"""
|
||||
match = (
|
||||
standard_duration_re.match(value) or
|
||||
iso8601_duration_re.match(value) or
|
||||
postgres_interval_re.match(value)
|
||||
)
|
||||
if match:
|
||||
kw = match.groupdict()
|
||||
days = datetime.timedelta(float(kw.pop('days', 0) or 0))
|
||||
sign = -1 if kw.pop('sign', '+') == '-' else 1
|
||||
if kw.get('microseconds'):
|
||||
kw['microseconds'] = kw['microseconds'].ljust(6, '0')
|
||||
if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):
|
||||
kw['microseconds'] = '-' + kw['microseconds']
|
||||
kw = {k: float(v) for k, v in kw.items() if v is not None}
|
||||
return days + sign * datetime.timedelta(**kw)
|
||||
49
venv/lib/python3.8/site-packages/django/utils/dates.py
Normal file
49
venv/lib/python3.8/site-packages/django/utils/dates.py
Normal file
@@ -0,0 +1,49 @@
|
||||
"Commonly-used date structures"
|
||||
|
||||
from django.utils.translation import gettext_lazy as _, pgettext_lazy
|
||||
|
||||
WEEKDAYS = {
|
||||
0: _('Monday'), 1: _('Tuesday'), 2: _('Wednesday'), 3: _('Thursday'), 4: _('Friday'),
|
||||
5: _('Saturday'), 6: _('Sunday')
|
||||
}
|
||||
WEEKDAYS_ABBR = {
|
||||
0: _('Mon'), 1: _('Tue'), 2: _('Wed'), 3: _('Thu'), 4: _('Fri'),
|
||||
5: _('Sat'), 6: _('Sun')
|
||||
}
|
||||
MONTHS = {
|
||||
1: _('January'), 2: _('February'), 3: _('March'), 4: _('April'), 5: _('May'), 6: _('June'),
|
||||
7: _('July'), 8: _('August'), 9: _('September'), 10: _('October'), 11: _('November'),
|
||||
12: _('December')
|
||||
}
|
||||
MONTHS_3 = {
|
||||
1: _('jan'), 2: _('feb'), 3: _('mar'), 4: _('apr'), 5: _('may'), 6: _('jun'),
|
||||
7: _('jul'), 8: _('aug'), 9: _('sep'), 10: _('oct'), 11: _('nov'), 12: _('dec')
|
||||
}
|
||||
MONTHS_AP = { # month names in Associated Press style
|
||||
1: pgettext_lazy('abbrev. month', 'Jan.'),
|
||||
2: pgettext_lazy('abbrev. month', 'Feb.'),
|
||||
3: pgettext_lazy('abbrev. month', 'March'),
|
||||
4: pgettext_lazy('abbrev. month', 'April'),
|
||||
5: pgettext_lazy('abbrev. month', 'May'),
|
||||
6: pgettext_lazy('abbrev. month', 'June'),
|
||||
7: pgettext_lazy('abbrev. month', 'July'),
|
||||
8: pgettext_lazy('abbrev. month', 'Aug.'),
|
||||
9: pgettext_lazy('abbrev. month', 'Sept.'),
|
||||
10: pgettext_lazy('abbrev. month', 'Oct.'),
|
||||
11: pgettext_lazy('abbrev. month', 'Nov.'),
|
||||
12: pgettext_lazy('abbrev. month', 'Dec.')
|
||||
}
|
||||
MONTHS_ALT = { # required for long date representation by some locales
|
||||
1: pgettext_lazy('alt. month', 'January'),
|
||||
2: pgettext_lazy('alt. month', 'February'),
|
||||
3: pgettext_lazy('alt. month', 'March'),
|
||||
4: pgettext_lazy('alt. month', 'April'),
|
||||
5: pgettext_lazy('alt. month', 'May'),
|
||||
6: pgettext_lazy('alt. month', 'June'),
|
||||
7: pgettext_lazy('alt. month', 'July'),
|
||||
8: pgettext_lazy('alt. month', 'August'),
|
||||
9: pgettext_lazy('alt. month', 'September'),
|
||||
10: pgettext_lazy('alt. month', 'October'),
|
||||
11: pgettext_lazy('alt. month', 'November'),
|
||||
12: pgettext_lazy('alt. month', 'December')
|
||||
}
|
||||
105
venv/lib/python3.8/site-packages/django/utils/datetime_safe.py
Normal file
105
venv/lib/python3.8/site-packages/django/utils/datetime_safe.py
Normal file
@@ -0,0 +1,105 @@
|
||||
# These classes override date and datetime to ensure that strftime('%Y')
|
||||
# returns four digits (with leading zeros) on years < 1000.
|
||||
# https://bugs.python.org/issue13305
|
||||
#
|
||||
# Based on code submitted to comp.lang.python by Andrew Dalke
|
||||
#
|
||||
# >>> datetime_safe.date(10, 8, 2).strftime("%Y/%m/%d was a %A")
|
||||
# '0010/08/02 was a Monday'
|
||||
|
||||
import re
|
||||
import time as ttime
|
||||
from datetime import (
|
||||
date as real_date, datetime as real_datetime, time as real_time,
|
||||
)
|
||||
|
||||
|
||||
class date(real_date):
|
||||
def strftime(self, fmt):
|
||||
return strftime(self, fmt)
|
||||
|
||||
|
||||
class datetime(real_datetime):
|
||||
def strftime(self, fmt):
|
||||
return strftime(self, fmt)
|
||||
|
||||
@classmethod
|
||||
def combine(cls, date, time):
|
||||
return cls(date.year, date.month, date.day,
|
||||
time.hour, time.minute, time.second,
|
||||
time.microsecond, time.tzinfo)
|
||||
|
||||
def date(self):
|
||||
return date(self.year, self.month, self.day)
|
||||
|
||||
|
||||
class time(real_time):
|
||||
pass
|
||||
|
||||
|
||||
def new_date(d):
|
||||
"Generate a safe date from a datetime.date object."
|
||||
return date(d.year, d.month, d.day)
|
||||
|
||||
|
||||
def new_datetime(d):
|
||||
"""
|
||||
Generate a safe datetime from a datetime.date or datetime.datetime object.
|
||||
"""
|
||||
kw = [d.year, d.month, d.day]
|
||||
if isinstance(d, real_datetime):
|
||||
kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
|
||||
return datetime(*kw)
|
||||
|
||||
|
||||
# This library does not support strftime's "%s" or "%y" format strings.
|
||||
# Allowed if there's an even number of "%"s because they are escaped.
|
||||
_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
|
||||
|
||||
|
||||
def _findall(text, substr):
|
||||
# Also finds overlaps
|
||||
sites = []
|
||||
i = 0
|
||||
while True:
|
||||
i = text.find(substr, i)
|
||||
if i == -1:
|
||||
break
|
||||
sites.append(i)
|
||||
i += 1
|
||||
return sites
|
||||
|
||||
|
||||
def strftime(dt, fmt):
|
||||
if dt.year >= 1000:
|
||||
return super(type(dt), dt).strftime(fmt)
|
||||
illegal_formatting = _illegal_formatting.search(fmt)
|
||||
if illegal_formatting:
|
||||
raise TypeError("strftime of dates before 1000 does not handle " + illegal_formatting.group(0))
|
||||
|
||||
year = dt.year
|
||||
# For every non-leap year century, advance by
|
||||
# 6 years to get into the 28-year repeat cycle
|
||||
delta = 2000 - year
|
||||
off = 6 * (delta // 100 + delta // 400)
|
||||
year = year + off
|
||||
|
||||
# Move to around the year 2000
|
||||
year = year + ((2000 - year) // 28) * 28
|
||||
timetuple = dt.timetuple()
|
||||
s1 = ttime.strftime(fmt, (year,) + timetuple[1:])
|
||||
sites1 = _findall(s1, str(year))
|
||||
|
||||
s2 = ttime.strftime(fmt, (year + 28,) + timetuple[1:])
|
||||
sites2 = _findall(s2, str(year + 28))
|
||||
|
||||
sites = []
|
||||
for site in sites1:
|
||||
if site in sites2:
|
||||
sites.append(site)
|
||||
|
||||
s = s1
|
||||
syear = "%04d" % (dt.year,)
|
||||
for site in sites:
|
||||
s = s[:site] + syear + s[site + 4:]
|
||||
return s
|
||||
55
venv/lib/python3.8/site-packages/django/utils/deconstruct.py
Normal file
55
venv/lib/python3.8/site-packages/django/utils/deconstruct.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from importlib import import_module
|
||||
|
||||
from django.utils.version import get_docs_version
|
||||
|
||||
|
||||
def deconstructible(*args, path=None):
|
||||
"""
|
||||
Class decorator that allows the decorated class to be serialized
|
||||
by the migrations subsystem.
|
||||
|
||||
The `path` kwarg specifies the import path.
|
||||
"""
|
||||
def decorator(klass):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
# We capture the arguments to make returning them trivial
|
||||
obj = super(klass, cls).__new__(cls)
|
||||
obj._constructor_args = (args, kwargs)
|
||||
return obj
|
||||
|
||||
def deconstruct(obj):
|
||||
"""
|
||||
Return a 3-tuple of class import path, positional arguments,
|
||||
and keyword arguments.
|
||||
"""
|
||||
# Fallback version
|
||||
if path:
|
||||
module_name, _, name = path.rpartition('.')
|
||||
else:
|
||||
module_name = obj.__module__
|
||||
name = obj.__class__.__name__
|
||||
# Make sure it's actually there and not an inner class
|
||||
module = import_module(module_name)
|
||||
if not hasattr(module, name):
|
||||
raise ValueError(
|
||||
"Could not find object %s in %s.\n"
|
||||
"Please note that you cannot serialize things like inner "
|
||||
"classes. Please move the object into the main module "
|
||||
"body to use migrations.\n"
|
||||
"For more information, see "
|
||||
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
|
||||
% (name, module_name, get_docs_version()))
|
||||
return (
|
||||
path or '%s.%s' % (obj.__class__.__module__, name),
|
||||
obj._constructor_args[0],
|
||||
obj._constructor_args[1],
|
||||
)
|
||||
|
||||
klass.__new__ = staticmethod(__new__)
|
||||
klass.deconstruct = deconstruct
|
||||
|
||||
return klass
|
||||
|
||||
if not args:
|
||||
return decorator
|
||||
return decorator(*args)
|
||||
164
venv/lib/python3.8/site-packages/django/utils/decorators.py
Normal file
164
venv/lib/python3.8/site-packages/django/utils/decorators.py
Normal file
@@ -0,0 +1,164 @@
|
||||
"Functions that help with dynamically creating decorators for views."
|
||||
|
||||
from functools import partial, update_wrapper, wraps
|
||||
|
||||
|
||||
class classonlymethod(classmethod):
|
||||
def __get__(self, instance, cls=None):
|
||||
if instance is not None:
|
||||
raise AttributeError("This method is available only on the class, not on instances.")
|
||||
return super().__get__(instance, cls)
|
||||
|
||||
|
||||
def _update_method_wrapper(_wrapper, decorator):
|
||||
# _multi_decorate()'s bound_method isn't available in this scope. Cheat by
|
||||
# using it on a dummy function.
|
||||
@decorator
|
||||
def dummy(*args, **kwargs):
|
||||
pass
|
||||
update_wrapper(_wrapper, dummy)
|
||||
|
||||
|
||||
def _multi_decorate(decorators, method):
|
||||
"""
|
||||
Decorate `method` with one or more function decorators. `decorators` can be
|
||||
a single decorator or an iterable of decorators.
|
||||
"""
|
||||
if hasattr(decorators, '__iter__'):
|
||||
# Apply a list/tuple of decorators if 'decorators' is one. Decorator
|
||||
# functions are applied so that the call order is the same as the
|
||||
# order in which they appear in the iterable.
|
||||
decorators = decorators[::-1]
|
||||
else:
|
||||
decorators = [decorators]
|
||||
|
||||
def _wrapper(self, *args, **kwargs):
|
||||
# bound_method has the signature that 'decorator' expects i.e. no
|
||||
# 'self' argument, but it's a closure over self so it can call
|
||||
# 'func'. Also, wrap method.__get__() in a function because new
|
||||
# attributes can't be set on bound method objects, only on functions.
|
||||
bound_method = partial(method.__get__(self, type(self)))
|
||||
for dec in decorators:
|
||||
bound_method = dec(bound_method)
|
||||
return bound_method(*args, **kwargs)
|
||||
|
||||
# Copy any attributes that a decorator adds to the function it decorates.
|
||||
for dec in decorators:
|
||||
_update_method_wrapper(_wrapper, dec)
|
||||
# Preserve any existing attributes of 'method', including the name.
|
||||
update_wrapper(_wrapper, method)
|
||||
return _wrapper
|
||||
|
||||
|
||||
def method_decorator(decorator, name=''):
|
||||
"""
|
||||
Convert a function decorator into a method decorator
|
||||
"""
|
||||
# 'obj' can be a class or a function. If 'obj' is a function at the time it
|
||||
# is passed to _dec, it will eventually be a method of the class it is
|
||||
# defined on. If 'obj' is a class, the 'name' is required to be the name
|
||||
# of the method that will be decorated.
|
||||
def _dec(obj):
|
||||
if not isinstance(obj, type):
|
||||
return _multi_decorate(decorator, obj)
|
||||
if not (name and hasattr(obj, name)):
|
||||
raise ValueError(
|
||||
"The keyword argument `name` must be the name of a method "
|
||||
"of the decorated class: %s. Got '%s' instead." % (obj, name)
|
||||
)
|
||||
method = getattr(obj, name)
|
||||
if not callable(method):
|
||||
raise TypeError(
|
||||
"Cannot decorate '%s' as it isn't a callable attribute of "
|
||||
"%s (%s)." % (name, obj, method)
|
||||
)
|
||||
_wrapper = _multi_decorate(decorator, method)
|
||||
setattr(obj, name, _wrapper)
|
||||
return obj
|
||||
|
||||
# Don't worry about making _dec look similar to a list/tuple as it's rather
|
||||
# meaningless.
|
||||
if not hasattr(decorator, '__iter__'):
|
||||
update_wrapper(_dec, decorator)
|
||||
# Change the name to aid debugging.
|
||||
obj = decorator if hasattr(decorator, '__name__') else decorator.__class__
|
||||
_dec.__name__ = 'method_decorator(%s)' % obj.__name__
|
||||
return _dec
|
||||
|
||||
|
||||
def decorator_from_middleware_with_args(middleware_class):
|
||||
"""
|
||||
Like decorator_from_middleware, but return a function
|
||||
that accepts the arguments to be passed to the middleware_class.
|
||||
Use like::
|
||||
|
||||
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
|
||||
# ...
|
||||
|
||||
@cache_page(3600)
|
||||
def my_view(request):
|
||||
# ...
|
||||
"""
|
||||
return make_middleware_decorator(middleware_class)
|
||||
|
||||
|
||||
def decorator_from_middleware(middleware_class):
|
||||
"""
|
||||
Given a middleware class (not an instance), return a view decorator. This
|
||||
lets you use middleware functionality on a per-view basis. The middleware
|
||||
is created with no params passed.
|
||||
"""
|
||||
return make_middleware_decorator(middleware_class)()
|
||||
|
||||
|
||||
def make_middleware_decorator(middleware_class):
|
||||
def _make_decorator(*m_args, **m_kwargs):
|
||||
middleware = middleware_class(*m_args, **m_kwargs)
|
||||
|
||||
def _decorator(view_func):
|
||||
@wraps(view_func)
|
||||
def _wrapped_view(request, *args, **kwargs):
|
||||
if hasattr(middleware, 'process_request'):
|
||||
result = middleware.process_request(request)
|
||||
if result is not None:
|
||||
return result
|
||||
if hasattr(middleware, 'process_view'):
|
||||
result = middleware.process_view(request, view_func, args, kwargs)
|
||||
if result is not None:
|
||||
return result
|
||||
try:
|
||||
response = view_func(request, *args, **kwargs)
|
||||
except Exception as e:
|
||||
if hasattr(middleware, 'process_exception'):
|
||||
result = middleware.process_exception(request, e)
|
||||
if result is not None:
|
||||
return result
|
||||
raise
|
||||
if hasattr(response, 'render') and callable(response.render):
|
||||
if hasattr(middleware, 'process_template_response'):
|
||||
response = middleware.process_template_response(request, response)
|
||||
# Defer running of process_response until after the template
|
||||
# has been rendered:
|
||||
if hasattr(middleware, 'process_response'):
|
||||
def callback(response):
|
||||
return middleware.process_response(request, response)
|
||||
response.add_post_render_callback(callback)
|
||||
else:
|
||||
if hasattr(middleware, 'process_response'):
|
||||
return middleware.process_response(request, response)
|
||||
return response
|
||||
return _wrapped_view
|
||||
return _decorator
|
||||
return _make_decorator
|
||||
|
||||
|
||||
class classproperty:
|
||||
def __init__(self, method=None):
|
||||
self.fget = method
|
||||
|
||||
def __get__(self, instance, cls=None):
|
||||
return self.fget(cls)
|
||||
|
||||
def getter(self, method):
|
||||
self.fget = method
|
||||
return self
|
||||
97
venv/lib/python3.8/site-packages/django/utils/deprecation.py
Normal file
97
venv/lib/python3.8/site-packages/django/utils/deprecation.py
Normal file
@@ -0,0 +1,97 @@
|
||||
import inspect
|
||||
import warnings
|
||||
|
||||
|
||||
class RemovedInDjango31Warning(DeprecationWarning):
|
||||
pass
|
||||
|
||||
|
||||
class RemovedInDjango40Warning(PendingDeprecationWarning):
|
||||
pass
|
||||
|
||||
|
||||
RemovedInNextVersionWarning = RemovedInDjango31Warning
|
||||
|
||||
|
||||
class warn_about_renamed_method:
|
||||
def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning):
|
||||
self.class_name = class_name
|
||||
self.old_method_name = old_method_name
|
||||
self.new_method_name = new_method_name
|
||||
self.deprecation_warning = deprecation_warning
|
||||
|
||||
def __call__(self, f):
|
||||
def wrapped(*args, **kwargs):
|
||||
warnings.warn(
|
||||
"`%s.%s` is deprecated, use `%s` instead." %
|
||||
(self.class_name, self.old_method_name, self.new_method_name),
|
||||
self.deprecation_warning, 2)
|
||||
return f(*args, **kwargs)
|
||||
return wrapped
|
||||
|
||||
|
||||
class RenameMethodsBase(type):
|
||||
"""
|
||||
Handles the deprecation paths when renaming a method.
|
||||
|
||||
It does the following:
|
||||
1) Define the new method if missing and complain about it.
|
||||
2) Define the old method if missing.
|
||||
3) Complain whenever an old method is called.
|
||||
|
||||
See #15363 for more details.
|
||||
"""
|
||||
|
||||
renamed_methods = ()
|
||||
|
||||
def __new__(cls, name, bases, attrs):
|
||||
new_class = super().__new__(cls, name, bases, attrs)
|
||||
|
||||
for base in inspect.getmro(new_class):
|
||||
class_name = base.__name__
|
||||
for renamed_method in cls.renamed_methods:
|
||||
old_method_name = renamed_method[0]
|
||||
old_method = base.__dict__.get(old_method_name)
|
||||
new_method_name = renamed_method[1]
|
||||
new_method = base.__dict__.get(new_method_name)
|
||||
deprecation_warning = renamed_method[2]
|
||||
wrapper = warn_about_renamed_method(class_name, *renamed_method)
|
||||
|
||||
# Define the new method if missing and complain about it
|
||||
if not new_method and old_method:
|
||||
warnings.warn(
|
||||
"`%s.%s` method should be renamed `%s`." %
|
||||
(class_name, old_method_name, new_method_name),
|
||||
deprecation_warning, 2)
|
||||
setattr(base, new_method_name, old_method)
|
||||
setattr(base, old_method_name, wrapper(old_method))
|
||||
|
||||
# Define the old method as a wrapped call to the new method.
|
||||
if not old_method and new_method:
|
||||
setattr(base, old_method_name, wrapper(new_method))
|
||||
|
||||
return new_class
|
||||
|
||||
|
||||
class DeprecationInstanceCheck(type):
|
||||
def __instancecheck__(self, instance):
|
||||
warnings.warn(
|
||||
"`%s` is deprecated, use `%s` instead." % (self.__name__, self.alternative),
|
||||
self.deprecation_warning, 2
|
||||
)
|
||||
return super().__instancecheck__(instance)
|
||||
|
||||
|
||||
class MiddlewareMixin:
|
||||
def __init__(self, get_response=None):
|
||||
self.get_response = get_response
|
||||
super().__init__()
|
||||
|
||||
def __call__(self, request):
|
||||
response = None
|
||||
if hasattr(self, 'process_request'):
|
||||
response = self.process_request(request)
|
||||
response = response or self.get_response(request)
|
||||
if hasattr(self, 'process_response'):
|
||||
response = self.process_response(request, response)
|
||||
return response
|
||||
44
venv/lib/python3.8/site-packages/django/utils/duration.py
Normal file
44
venv/lib/python3.8/site-packages/django/utils/duration.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import datetime
|
||||
|
||||
|
||||
def _get_duration_components(duration):
|
||||
days = duration.days
|
||||
seconds = duration.seconds
|
||||
microseconds = duration.microseconds
|
||||
|
||||
minutes = seconds // 60
|
||||
seconds = seconds % 60
|
||||
|
||||
hours = minutes // 60
|
||||
minutes = minutes % 60
|
||||
|
||||
return days, hours, minutes, seconds, microseconds
|
||||
|
||||
|
||||
def duration_string(duration):
|
||||
"""Version of str(timedelta) which is not English specific."""
|
||||
days, hours, minutes, seconds, microseconds = _get_duration_components(duration)
|
||||
|
||||
string = '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds)
|
||||
if days:
|
||||
string = '{} '.format(days) + string
|
||||
if microseconds:
|
||||
string += '.{:06d}'.format(microseconds)
|
||||
|
||||
return string
|
||||
|
||||
|
||||
def duration_iso_string(duration):
|
||||
if duration < datetime.timedelta(0):
|
||||
sign = '-'
|
||||
duration *= -1
|
||||
else:
|
||||
sign = ''
|
||||
|
||||
days, hours, minutes, seconds, microseconds = _get_duration_components(duration)
|
||||
ms = '.{:06d}'.format(microseconds) if microseconds else ""
|
||||
return '{}P{}DT{:02d}H{:02d}M{:02d}{}S'.format(sign, days, hours, minutes, seconds, ms)
|
||||
|
||||
|
||||
def duration_microseconds(delta):
|
||||
return (24 * 60 * 60 * delta.days + delta.seconds) * 1000000 + delta.microseconds
|
||||
273
venv/lib/python3.8/site-packages/django/utils/encoding.py
Normal file
273
venv/lib/python3.8/site-packages/django/utils/encoding.py
Normal file
@@ -0,0 +1,273 @@
|
||||
import codecs
|
||||
import datetime
|
||||
import locale
|
||||
import warnings
|
||||
from decimal import Decimal
|
||||
from urllib.parse import quote
|
||||
|
||||
from django.utils.deprecation import RemovedInDjango40Warning
|
||||
from django.utils.functional import Promise
|
||||
|
||||
|
||||
class DjangoUnicodeDecodeError(UnicodeDecodeError):
|
||||
def __init__(self, obj, *args):
|
||||
self.obj = obj
|
||||
super().__init__(*args)
|
||||
|
||||
def __str__(self):
|
||||
return '%s. You passed in %r (%s)' % (super().__str__(), self.obj, type(self.obj))
|
||||
|
||||
|
||||
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
|
||||
"""
|
||||
Return a string representing 's'. Treat bytestrings using the 'encoding'
|
||||
codec.
|
||||
|
||||
If strings_only is True, don't convert (some) non-string-like objects.
|
||||
"""
|
||||
if isinstance(s, Promise):
|
||||
# The input is the result of a gettext_lazy() call.
|
||||
return s
|
||||
return force_str(s, encoding, strings_only, errors)
|
||||
|
||||
|
||||
_PROTECTED_TYPES = (
|
||||
type(None), int, float, Decimal, datetime.datetime, datetime.date, datetime.time,
|
||||
)
|
||||
|
||||
|
||||
def is_protected_type(obj):
|
||||
"""Determine if the object instance is of a protected type.
|
||||
|
||||
Objects of protected types are preserved as-is when passed to
|
||||
force_str(strings_only=True).
|
||||
"""
|
||||
return isinstance(obj, _PROTECTED_TYPES)
|
||||
|
||||
|
||||
def force_str(s, encoding='utf-8', strings_only=False, errors='strict'):
|
||||
"""
|
||||
Similar to smart_str(), except that lazy instances are resolved to
|
||||
strings, rather than kept as lazy objects.
|
||||
|
||||
If strings_only is True, don't convert (some) non-string-like objects.
|
||||
"""
|
||||
# Handle the common case first for performance reasons.
|
||||
if issubclass(type(s), str):
|
||||
return s
|
||||
if strings_only and is_protected_type(s):
|
||||
return s
|
||||
try:
|
||||
if isinstance(s, bytes):
|
||||
s = str(s, encoding, errors)
|
||||
else:
|
||||
s = str(s)
|
||||
except UnicodeDecodeError as e:
|
||||
raise DjangoUnicodeDecodeError(s, *e.args)
|
||||
return s
|
||||
|
||||
|
||||
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
|
||||
"""
|
||||
Return a bytestring version of 's', encoded as specified in 'encoding'.
|
||||
|
||||
If strings_only is True, don't convert (some) non-string-like objects.
|
||||
"""
|
||||
if isinstance(s, Promise):
|
||||
# The input is the result of a gettext_lazy() call.
|
||||
return s
|
||||
return force_bytes(s, encoding, strings_only, errors)
|
||||
|
||||
|
||||
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
|
||||
"""
|
||||
Similar to smart_bytes, except that lazy instances are resolved to
|
||||
strings, rather than kept as lazy objects.
|
||||
|
||||
If strings_only is True, don't convert (some) non-string-like objects.
|
||||
"""
|
||||
# Handle the common case first for performance reasons.
|
||||
if isinstance(s, bytes):
|
||||
if encoding == 'utf-8':
|
||||
return s
|
||||
else:
|
||||
return s.decode('utf-8', errors).encode(encoding, errors)
|
||||
if strings_only and is_protected_type(s):
|
||||
return s
|
||||
if isinstance(s, memoryview):
|
||||
return bytes(s)
|
||||
return str(s).encode(encoding, errors)
|
||||
|
||||
|
||||
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
|
||||
warnings.warn(
|
||||
'smart_text() is deprecated in favor of smart_str().',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return smart_str(s, encoding, strings_only, errors)
|
||||
|
||||
|
||||
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
|
||||
warnings.warn(
|
||||
'force_text() is deprecated in favor of force_str().',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return force_str(s, encoding, strings_only, errors)
|
||||
|
||||
|
||||
def iri_to_uri(iri):
|
||||
"""
|
||||
Convert an Internationalized Resource Identifier (IRI) portion to a URI
|
||||
portion that is suitable for inclusion in a URL.
|
||||
|
||||
This is the algorithm from section 3.1 of RFC 3987, slightly simplified
|
||||
since the input is assumed to be a string rather than an arbitrary byte
|
||||
stream.
|
||||
|
||||
Take an IRI (string or UTF-8 bytes, e.g. '/I ♥ Django/' or
|
||||
b'/I \xe2\x99\xa5 Django/') and return a string containing the encoded
|
||||
result with ASCII chars only (e.g. '/I%20%E2%99%A5%20Django/').
|
||||
"""
|
||||
# The list of safe characters here is constructed from the "reserved" and
|
||||
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
|
||||
# reserved = gen-delims / sub-delims
|
||||
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
|
||||
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
# / "*" / "+" / "," / ";" / "="
|
||||
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||
# Of the unreserved characters, urllib.parse.quote() already considers all
|
||||
# but the ~ safe.
|
||||
# The % character is also added to the list of safe characters here, as the
|
||||
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
|
||||
# converted.
|
||||
if iri is None:
|
||||
return iri
|
||||
elif isinstance(iri, Promise):
|
||||
iri = str(iri)
|
||||
return quote(iri, safe="/#%[]=:;$&()+,!?*@'~")
|
||||
|
||||
|
||||
# List of byte values that uri_to_iri() decodes from percent encoding.
|
||||
# First, the unreserved characters from RFC 3986:
|
||||
_ascii_ranges = [[45, 46, 95, 126], range(65, 91), range(97, 123)]
|
||||
_hextobyte = {
|
||||
(fmt % char).encode(): bytes((char,))
|
||||
for ascii_range in _ascii_ranges
|
||||
for char in ascii_range
|
||||
for fmt in ['%02x', '%02X']
|
||||
}
|
||||
# And then everything above 128, because bytes ≥ 128 are part of multibyte
|
||||
# unicode characters.
|
||||
_hexdig = '0123456789ABCDEFabcdef'
|
||||
_hextobyte.update({
|
||||
(a + b).encode(): bytes.fromhex(a + b)
|
||||
for a in _hexdig[8:] for b in _hexdig
|
||||
})
|
||||
|
||||
|
||||
def uri_to_iri(uri):
|
||||
"""
|
||||
Convert a Uniform Resource Identifier(URI) into an Internationalized
|
||||
Resource Identifier(IRI).
|
||||
|
||||
This is the algorithm from section 3.2 of RFC 3987, excluding step 4.
|
||||
|
||||
Take an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and return
|
||||
a string containing the encoded result (e.g. '/I%20♥%20Django/').
|
||||
"""
|
||||
if uri is None:
|
||||
return uri
|
||||
uri = force_bytes(uri)
|
||||
# Fast selective unqote: First, split on '%' and then starting with the
|
||||
# second block, decode the first 2 bytes if they represent a hex code to
|
||||
# decode. The rest of the block is the part after '%AB', not containing
|
||||
# any '%'. Add that to the output without further processing.
|
||||
bits = uri.split(b'%')
|
||||
if len(bits) == 1:
|
||||
iri = uri
|
||||
else:
|
||||
parts = [bits[0]]
|
||||
append = parts.append
|
||||
hextobyte = _hextobyte
|
||||
for item in bits[1:]:
|
||||
hex = item[:2]
|
||||
if hex in hextobyte:
|
||||
append(hextobyte[item[:2]])
|
||||
append(item[2:])
|
||||
else:
|
||||
append(b'%')
|
||||
append(item)
|
||||
iri = b''.join(parts)
|
||||
return repercent_broken_unicode(iri).decode()
|
||||
|
||||
|
||||
def escape_uri_path(path):
|
||||
"""
|
||||
Escape the unsafe characters from the path portion of a Uniform Resource
|
||||
Identifier (URI).
|
||||
"""
|
||||
# These are the "reserved" and "unreserved" characters specified in
|
||||
# sections 2.2 and 2.3 of RFC 2396:
|
||||
# reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | ","
|
||||
# unreserved = alphanum | mark
|
||||
# mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")"
|
||||
# The list of safe characters here is constructed subtracting ";", "=",
|
||||
# and "?" according to section 3.3 of RFC 2396.
|
||||
# The reason for not subtracting and escaping "/" is that we are escaping
|
||||
# the entire path, not a path segment.
|
||||
return quote(path, safe="/:@&+$,-_.!~*'()")
|
||||
|
||||
|
||||
def punycode(domain):
|
||||
"""Return the Punycode of the given domain if it's non-ASCII."""
|
||||
return domain.encode('idna').decode('ascii')
|
||||
|
||||
|
||||
def repercent_broken_unicode(path):
|
||||
"""
|
||||
As per section 3.2 of RFC 3987, step three of converting a URI into an IRI,
|
||||
repercent-encode any octet produced that is not part of a strictly legal
|
||||
UTF-8 octet sequence.
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
path.decode()
|
||||
except UnicodeDecodeError as e:
|
||||
# CVE-2019-14235: A recursion shouldn't be used since the exception
|
||||
# handling uses massive amounts of memory
|
||||
repercent = quote(path[e.start:e.end], safe=b"/#%[]=:;$&()+,!?*@'~")
|
||||
path = path[:e.start] + repercent.encode() + path[e.end:]
|
||||
else:
|
||||
return path
|
||||
|
||||
|
||||
def filepath_to_uri(path):
|
||||
"""Convert a file system path to a URI portion that is suitable for
|
||||
inclusion in a URL.
|
||||
|
||||
Encode certain chars that would normally be recognized as special chars
|
||||
for URIs. Do not encode the ' character, as it is a valid character
|
||||
within URIs. See the encodeURIComponent() JavaScript function for details.
|
||||
"""
|
||||
if path is None:
|
||||
return path
|
||||
# I know about `os.sep` and `os.altsep` but I want to leave
|
||||
# some flexibility for hardcoding separators.
|
||||
return quote(path.replace("\\", "/"), safe="/~!*()'")
|
||||
|
||||
|
||||
def get_system_encoding():
|
||||
"""
|
||||
The encoding of the default system locale. Fallback to 'ascii' if the
|
||||
#encoding is unsupported by Python or could not be determined. See tickets
|
||||
#10335 and #5846.
|
||||
"""
|
||||
try:
|
||||
encoding = locale.getdefaultlocale()[1] or 'ascii'
|
||||
codecs.lookup(encoding)
|
||||
except Exception:
|
||||
encoding = 'ascii'
|
||||
return encoding
|
||||
|
||||
|
||||
DEFAULT_LOCALE_ENCODING = get_system_encoding()
|
||||
392
venv/lib/python3.8/site-packages/django/utils/feedgenerator.py
Normal file
392
venv/lib/python3.8/site-packages/django/utils/feedgenerator.py
Normal file
@@ -0,0 +1,392 @@
|
||||
"""
|
||||
Syndication feed generation library -- used for generating RSS, etc.
|
||||
|
||||
Sample usage:
|
||||
|
||||
>>> from django.utils import feedgenerator
|
||||
>>> feed = feedgenerator.Rss201rev2Feed(
|
||||
... title="Poynter E-Media Tidbits",
|
||||
... link="http://www.poynter.org/column.asp?id=31",
|
||||
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
|
||||
... language="en",
|
||||
... )
|
||||
>>> feed.add_item(
|
||||
... title="Hello",
|
||||
... link="http://www.holovaty.com/test/",
|
||||
... description="Testing."
|
||||
... )
|
||||
>>> with open('test.rss', 'w') as fp:
|
||||
... feed.write(fp, 'utf-8')
|
||||
|
||||
For definitions of the different versions of RSS, see:
|
||||
https://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
|
||||
"""
|
||||
import datetime
|
||||
import email
|
||||
from io import StringIO
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from django.utils.encoding import iri_to_uri
|
||||
from django.utils.timezone import utc
|
||||
from django.utils.xmlutils import SimplerXMLGenerator
|
||||
|
||||
|
||||
def rfc2822_date(date):
|
||||
if not isinstance(date, datetime.datetime):
|
||||
date = datetime.datetime.combine(date, datetime.time())
|
||||
return email.utils.format_datetime(date)
|
||||
|
||||
|
||||
def rfc3339_date(date):
|
||||
if not isinstance(date, datetime.datetime):
|
||||
date = datetime.datetime.combine(date, datetime.time())
|
||||
return date.isoformat() + ('Z' if date.utcoffset() is None else '')
|
||||
|
||||
|
||||
def get_tag_uri(url, date):
|
||||
"""
|
||||
Create a TagURI.
|
||||
|
||||
See https://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
|
||||
"""
|
||||
bits = urlparse(url)
|
||||
d = ''
|
||||
if date is not None:
|
||||
d = ',%s' % date.strftime('%Y-%m-%d')
|
||||
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
|
||||
|
||||
|
||||
class SyndicationFeed:
|
||||
"Base class for all syndication feeds. Subclasses should provide write()"
|
||||
def __init__(self, title, link, description, language=None, author_email=None,
|
||||
author_name=None, author_link=None, subtitle=None, categories=None,
|
||||
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
|
||||
def to_str(s):
|
||||
return str(s) if s is not None else s
|
||||
categories = categories and [str(c) for c in categories]
|
||||
self.feed = {
|
||||
'title': to_str(title),
|
||||
'link': iri_to_uri(link),
|
||||
'description': to_str(description),
|
||||
'language': to_str(language),
|
||||
'author_email': to_str(author_email),
|
||||
'author_name': to_str(author_name),
|
||||
'author_link': iri_to_uri(author_link),
|
||||
'subtitle': to_str(subtitle),
|
||||
'categories': categories or (),
|
||||
'feed_url': iri_to_uri(feed_url),
|
||||
'feed_copyright': to_str(feed_copyright),
|
||||
'id': feed_guid or link,
|
||||
'ttl': to_str(ttl),
|
||||
**kwargs,
|
||||
}
|
||||
self.items = []
|
||||
|
||||
def add_item(self, title, link, description, author_email=None,
|
||||
author_name=None, author_link=None, pubdate=None, comments=None,
|
||||
unique_id=None, unique_id_is_permalink=None, categories=(),
|
||||
item_copyright=None, ttl=None, updateddate=None, enclosures=None, **kwargs):
|
||||
"""
|
||||
Add an item to the feed. All args are expected to be strings except
|
||||
pubdate and updateddate, which are datetime.datetime objects, and
|
||||
enclosures, which is an iterable of instances of the Enclosure class.
|
||||
"""
|
||||
def to_str(s):
|
||||
return str(s) if s is not None else s
|
||||
categories = categories and [to_str(c) for c in categories]
|
||||
self.items.append({
|
||||
'title': to_str(title),
|
||||
'link': iri_to_uri(link),
|
||||
'description': to_str(description),
|
||||
'author_email': to_str(author_email),
|
||||
'author_name': to_str(author_name),
|
||||
'author_link': iri_to_uri(author_link),
|
||||
'pubdate': pubdate,
|
||||
'updateddate': updateddate,
|
||||
'comments': to_str(comments),
|
||||
'unique_id': to_str(unique_id),
|
||||
'unique_id_is_permalink': unique_id_is_permalink,
|
||||
'enclosures': enclosures or (),
|
||||
'categories': categories or (),
|
||||
'item_copyright': to_str(item_copyright),
|
||||
'ttl': to_str(ttl),
|
||||
**kwargs,
|
||||
})
|
||||
|
||||
def num_items(self):
|
||||
return len(self.items)
|
||||
|
||||
def root_attributes(self):
|
||||
"""
|
||||
Return extra attributes to place on the root (i.e. feed/channel) element.
|
||||
Called from write().
|
||||
"""
|
||||
return {}
|
||||
|
||||
def add_root_elements(self, handler):
|
||||
"""
|
||||
Add elements in the root (i.e. feed/channel) element. Called
|
||||
from write().
|
||||
"""
|
||||
pass
|
||||
|
||||
def item_attributes(self, item):
|
||||
"""
|
||||
Return extra attributes to place on each item (i.e. item/entry) element.
|
||||
"""
|
||||
return {}
|
||||
|
||||
def add_item_elements(self, handler, item):
|
||||
"""
|
||||
Add elements on each item (i.e. item/entry) element.
|
||||
"""
|
||||
pass
|
||||
|
||||
def write(self, outfile, encoding):
|
||||
"""
|
||||
Output the feed in the given encoding to outfile, which is a file-like
|
||||
object. Subclasses should override this.
|
||||
"""
|
||||
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
|
||||
|
||||
def writeString(self, encoding):
|
||||
"""
|
||||
Return the feed in the given encoding as a string.
|
||||
"""
|
||||
s = StringIO()
|
||||
self.write(s, encoding)
|
||||
return s.getvalue()
|
||||
|
||||
def latest_post_date(self):
|
||||
"""
|
||||
Return the latest item's pubdate or updateddate. If no items
|
||||
have either of these attributes this return the current UTC date/time.
|
||||
"""
|
||||
latest_date = None
|
||||
date_keys = ('updateddate', 'pubdate')
|
||||
|
||||
for item in self.items:
|
||||
for date_key in date_keys:
|
||||
item_date = item.get(date_key)
|
||||
if item_date:
|
||||
if latest_date is None or item_date > latest_date:
|
||||
latest_date = item_date
|
||||
|
||||
# datetime.now(tz=utc) is slower, as documented in django.utils.timezone.now
|
||||
return latest_date or datetime.datetime.utcnow().replace(tzinfo=utc)
|
||||
|
||||
|
||||
class Enclosure:
|
||||
"""An RSS enclosure"""
|
||||
def __init__(self, url, length, mime_type):
|
||||
"All args are expected to be strings"
|
||||
self.length, self.mime_type = length, mime_type
|
||||
self.url = iri_to_uri(url)
|
||||
|
||||
|
||||
class RssFeed(SyndicationFeed):
|
||||
content_type = 'application/rss+xml; charset=utf-8'
|
||||
|
||||
def write(self, outfile, encoding):
|
||||
handler = SimplerXMLGenerator(outfile, encoding)
|
||||
handler.startDocument()
|
||||
handler.startElement("rss", self.rss_attributes())
|
||||
handler.startElement("channel", self.root_attributes())
|
||||
self.add_root_elements(handler)
|
||||
self.write_items(handler)
|
||||
self.endChannelElement(handler)
|
||||
handler.endElement("rss")
|
||||
|
||||
def rss_attributes(self):
|
||||
return {
|
||||
'version': self._version,
|
||||
'xmlns:atom': 'http://www.w3.org/2005/Atom',
|
||||
}
|
||||
|
||||
def write_items(self, handler):
|
||||
for item in self.items:
|
||||
handler.startElement('item', self.item_attributes(item))
|
||||
self.add_item_elements(handler, item)
|
||||
handler.endElement("item")
|
||||
|
||||
def add_root_elements(self, handler):
|
||||
handler.addQuickElement("title", self.feed['title'])
|
||||
handler.addQuickElement("link", self.feed['link'])
|
||||
handler.addQuickElement("description", self.feed['description'])
|
||||
if self.feed['feed_url'] is not None:
|
||||
handler.addQuickElement("atom:link", None, {"rel": "self", "href": self.feed['feed_url']})
|
||||
if self.feed['language'] is not None:
|
||||
handler.addQuickElement("language", self.feed['language'])
|
||||
for cat in self.feed['categories']:
|
||||
handler.addQuickElement("category", cat)
|
||||
if self.feed['feed_copyright'] is not None:
|
||||
handler.addQuickElement("copyright", self.feed['feed_copyright'])
|
||||
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
|
||||
if self.feed['ttl'] is not None:
|
||||
handler.addQuickElement("ttl", self.feed['ttl'])
|
||||
|
||||
def endChannelElement(self, handler):
|
||||
handler.endElement("channel")
|
||||
|
||||
|
||||
class RssUserland091Feed(RssFeed):
|
||||
_version = "0.91"
|
||||
|
||||
def add_item_elements(self, handler, item):
|
||||
handler.addQuickElement("title", item['title'])
|
||||
handler.addQuickElement("link", item['link'])
|
||||
if item['description'] is not None:
|
||||
handler.addQuickElement("description", item['description'])
|
||||
|
||||
|
||||
class Rss201rev2Feed(RssFeed):
|
||||
# Spec: https://cyber.harvard.edu/rss/rss.html
|
||||
_version = "2.0"
|
||||
|
||||
def add_item_elements(self, handler, item):
|
||||
handler.addQuickElement("title", item['title'])
|
||||
handler.addQuickElement("link", item['link'])
|
||||
if item['description'] is not None:
|
||||
handler.addQuickElement("description", item['description'])
|
||||
|
||||
# Author information.
|
||||
if item["author_name"] and item["author_email"]:
|
||||
handler.addQuickElement("author", "%s (%s)" % (item['author_email'], item['author_name']))
|
||||
elif item["author_email"]:
|
||||
handler.addQuickElement("author", item["author_email"])
|
||||
elif item["author_name"]:
|
||||
handler.addQuickElement(
|
||||
"dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"}
|
||||
)
|
||||
|
||||
if item['pubdate'] is not None:
|
||||
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
|
||||
if item['comments'] is not None:
|
||||
handler.addQuickElement("comments", item['comments'])
|
||||
if item['unique_id'] is not None:
|
||||
guid_attrs = {}
|
||||
if isinstance(item.get('unique_id_is_permalink'), bool):
|
||||
guid_attrs['isPermaLink'] = str(item['unique_id_is_permalink']).lower()
|
||||
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
|
||||
if item['ttl'] is not None:
|
||||
handler.addQuickElement("ttl", item['ttl'])
|
||||
|
||||
# Enclosure.
|
||||
if item['enclosures']:
|
||||
enclosures = list(item['enclosures'])
|
||||
if len(enclosures) > 1:
|
||||
raise ValueError(
|
||||
"RSS feed items may only have one enclosure, see "
|
||||
"http://www.rssboard.org/rss-profile#element-channel-item-enclosure"
|
||||
)
|
||||
enclosure = enclosures[0]
|
||||
handler.addQuickElement('enclosure', '', {
|
||||
'url': enclosure.url,
|
||||
'length': enclosure.length,
|
||||
'type': enclosure.mime_type,
|
||||
})
|
||||
|
||||
# Categories.
|
||||
for cat in item['categories']:
|
||||
handler.addQuickElement("category", cat)
|
||||
|
||||
|
||||
class Atom1Feed(SyndicationFeed):
|
||||
# Spec: https://tools.ietf.org/html/rfc4287
|
||||
content_type = 'application/atom+xml; charset=utf-8'
|
||||
ns = "http://www.w3.org/2005/Atom"
|
||||
|
||||
def write(self, outfile, encoding):
|
||||
handler = SimplerXMLGenerator(outfile, encoding)
|
||||
handler.startDocument()
|
||||
handler.startElement('feed', self.root_attributes())
|
||||
self.add_root_elements(handler)
|
||||
self.write_items(handler)
|
||||
handler.endElement("feed")
|
||||
|
||||
def root_attributes(self):
|
||||
if self.feed['language'] is not None:
|
||||
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
|
||||
else:
|
||||
return {"xmlns": self.ns}
|
||||
|
||||
def add_root_elements(self, handler):
|
||||
handler.addQuickElement("title", self.feed['title'])
|
||||
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
|
||||
if self.feed['feed_url'] is not None:
|
||||
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
|
||||
handler.addQuickElement("id", self.feed['id'])
|
||||
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
|
||||
if self.feed['author_name'] is not None:
|
||||
handler.startElement("author", {})
|
||||
handler.addQuickElement("name", self.feed['author_name'])
|
||||
if self.feed['author_email'] is not None:
|
||||
handler.addQuickElement("email", self.feed['author_email'])
|
||||
if self.feed['author_link'] is not None:
|
||||
handler.addQuickElement("uri", self.feed['author_link'])
|
||||
handler.endElement("author")
|
||||
if self.feed['subtitle'] is not None:
|
||||
handler.addQuickElement("subtitle", self.feed['subtitle'])
|
||||
for cat in self.feed['categories']:
|
||||
handler.addQuickElement("category", "", {"term": cat})
|
||||
if self.feed['feed_copyright'] is not None:
|
||||
handler.addQuickElement("rights", self.feed['feed_copyright'])
|
||||
|
||||
def write_items(self, handler):
|
||||
for item in self.items:
|
||||
handler.startElement("entry", self.item_attributes(item))
|
||||
self.add_item_elements(handler, item)
|
||||
handler.endElement("entry")
|
||||
|
||||
def add_item_elements(self, handler, item):
|
||||
handler.addQuickElement("title", item['title'])
|
||||
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
|
||||
|
||||
if item['pubdate'] is not None:
|
||||
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
|
||||
|
||||
if item['updateddate'] is not None:
|
||||
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
|
||||
|
||||
# Author information.
|
||||
if item['author_name'] is not None:
|
||||
handler.startElement("author", {})
|
||||
handler.addQuickElement("name", item['author_name'])
|
||||
if item['author_email'] is not None:
|
||||
handler.addQuickElement("email", item['author_email'])
|
||||
if item['author_link'] is not None:
|
||||
handler.addQuickElement("uri", item['author_link'])
|
||||
handler.endElement("author")
|
||||
|
||||
# Unique ID.
|
||||
if item['unique_id'] is not None:
|
||||
unique_id = item['unique_id']
|
||||
else:
|
||||
unique_id = get_tag_uri(item['link'], item['pubdate'])
|
||||
handler.addQuickElement("id", unique_id)
|
||||
|
||||
# Summary.
|
||||
if item['description'] is not None:
|
||||
handler.addQuickElement("summary", item['description'], {"type": "html"})
|
||||
|
||||
# Enclosures.
|
||||
for enclosure in item['enclosures']:
|
||||
handler.addQuickElement('link', '', {
|
||||
'rel': 'enclosure',
|
||||
'href': enclosure.url,
|
||||
'length': enclosure.length,
|
||||
'type': enclosure.mime_type,
|
||||
})
|
||||
|
||||
# Categories.
|
||||
for cat in item['categories']:
|
||||
handler.addQuickElement("category", "", {"term": cat})
|
||||
|
||||
# Rights.
|
||||
if item['item_copyright'] is not None:
|
||||
handler.addQuickElement("rights", item['item_copyright'])
|
||||
|
||||
|
||||
# This isolates the decision of what the system default is, so calling code can
|
||||
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
|
||||
DefaultFeed = Rss201rev2Feed
|
||||
257
venv/lib/python3.8/site-packages/django/utils/formats.py
Normal file
257
venv/lib/python3.8/site-packages/django/utils/formats.py
Normal file
@@ -0,0 +1,257 @@
|
||||
import datetime
|
||||
import decimal
|
||||
import unicodedata
|
||||
from importlib import import_module
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils import dateformat, datetime_safe, numberformat
|
||||
from django.utils.functional import lazy
|
||||
from django.utils.translation import (
|
||||
check_for_language, get_language, to_locale,
|
||||
)
|
||||
|
||||
# format_cache is a mapping from (format_type, lang) to the format string.
|
||||
# By using the cache, it is possible to avoid running get_format_modules
|
||||
# repeatedly.
|
||||
_format_cache = {}
|
||||
_format_modules_cache = {}
|
||||
|
||||
ISO_INPUT_FORMATS = {
|
||||
'DATE_INPUT_FORMATS': ['%Y-%m-%d'],
|
||||
'TIME_INPUT_FORMATS': ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'],
|
||||
'DATETIME_INPUT_FORMATS': [
|
||||
'%Y-%m-%d %H:%M:%S',
|
||||
'%Y-%m-%d %H:%M:%S.%f',
|
||||
'%Y-%m-%d %H:%M',
|
||||
'%Y-%m-%d'
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
FORMAT_SETTINGS = frozenset([
|
||||
'DECIMAL_SEPARATOR',
|
||||
'THOUSAND_SEPARATOR',
|
||||
'NUMBER_GROUPING',
|
||||
'FIRST_DAY_OF_WEEK',
|
||||
'MONTH_DAY_FORMAT',
|
||||
'TIME_FORMAT',
|
||||
'DATE_FORMAT',
|
||||
'DATETIME_FORMAT',
|
||||
'SHORT_DATE_FORMAT',
|
||||
'SHORT_DATETIME_FORMAT',
|
||||
'YEAR_MONTH_FORMAT',
|
||||
'DATE_INPUT_FORMATS',
|
||||
'TIME_INPUT_FORMATS',
|
||||
'DATETIME_INPUT_FORMATS',
|
||||
])
|
||||
|
||||
|
||||
def reset_format_cache():
|
||||
"""Clear any cached formats.
|
||||
|
||||
This method is provided primarily for testing purposes,
|
||||
so that the effects of cached formats can be removed.
|
||||
"""
|
||||
global _format_cache, _format_modules_cache
|
||||
_format_cache = {}
|
||||
_format_modules_cache = {}
|
||||
|
||||
|
||||
def iter_format_modules(lang, format_module_path=None):
|
||||
"""Find format modules."""
|
||||
if not check_for_language(lang):
|
||||
return
|
||||
|
||||
if format_module_path is None:
|
||||
format_module_path = settings.FORMAT_MODULE_PATH
|
||||
|
||||
format_locations = []
|
||||
if format_module_path:
|
||||
if isinstance(format_module_path, str):
|
||||
format_module_path = [format_module_path]
|
||||
for path in format_module_path:
|
||||
format_locations.append(path + '.%s')
|
||||
format_locations.append('django.conf.locale.%s')
|
||||
locale = to_locale(lang)
|
||||
locales = [locale]
|
||||
if '_' in locale:
|
||||
locales.append(locale.split('_')[0])
|
||||
for location in format_locations:
|
||||
for loc in locales:
|
||||
try:
|
||||
yield import_module('%s.formats' % (location % loc))
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def get_format_modules(lang=None, reverse=False):
|
||||
"""Return a list of the format modules found."""
|
||||
if lang is None:
|
||||
lang = get_language()
|
||||
if lang not in _format_modules_cache:
|
||||
_format_modules_cache[lang] = list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH))
|
||||
modules = _format_modules_cache[lang]
|
||||
if reverse:
|
||||
return list(reversed(modules))
|
||||
return modules
|
||||
|
||||
|
||||
def get_format(format_type, lang=None, use_l10n=None):
|
||||
"""
|
||||
For a specific format type, return the format for the current
|
||||
language (locale). Default to the format in the settings.
|
||||
format_type is the name of the format, e.g. 'DATE_FORMAT'.
|
||||
|
||||
If use_l10n is provided and is not None, it forces the value to
|
||||
be localized (or not), overriding the value of settings.USE_L10N.
|
||||
"""
|
||||
use_l10n = use_l10n or (use_l10n is None and settings.USE_L10N)
|
||||
if use_l10n and lang is None:
|
||||
lang = get_language()
|
||||
cache_key = (format_type, lang)
|
||||
try:
|
||||
return _format_cache[cache_key]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# The requested format_type has not been cached yet. Try to find it in any
|
||||
# of the format_modules for the given lang if l10n is enabled. If it's not
|
||||
# there or if l10n is disabled, fall back to the project settings.
|
||||
val = None
|
||||
if use_l10n:
|
||||
for module in get_format_modules(lang):
|
||||
val = getattr(module, format_type, None)
|
||||
if val is not None:
|
||||
break
|
||||
if val is None:
|
||||
if format_type not in FORMAT_SETTINGS:
|
||||
return format_type
|
||||
val = getattr(settings, format_type)
|
||||
elif format_type in ISO_INPUT_FORMATS:
|
||||
# If a list of input formats from one of the format_modules was
|
||||
# retrieved, make sure the ISO_INPUT_FORMATS are in this list.
|
||||
val = list(val)
|
||||
for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):
|
||||
if iso_input not in val:
|
||||
val.append(iso_input)
|
||||
_format_cache[cache_key] = val
|
||||
return val
|
||||
|
||||
|
||||
get_format_lazy = lazy(get_format, str, list, tuple)
|
||||
|
||||
|
||||
def date_format(value, format=None, use_l10n=None):
|
||||
"""
|
||||
Format a datetime.date or datetime.datetime object using a
|
||||
localizable format.
|
||||
|
||||
If use_l10n is provided and is not None, that will force the value to
|
||||
be localized (or not), overriding the value of settings.USE_L10N.
|
||||
"""
|
||||
return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))
|
||||
|
||||
|
||||
def time_format(value, format=None, use_l10n=None):
|
||||
"""
|
||||
Format a datetime.time object using a localizable format.
|
||||
|
||||
If use_l10n is provided and is not None, it forces the value to
|
||||
be localized (or not), overriding the value of settings.USE_L10N.
|
||||
"""
|
||||
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))
|
||||
|
||||
|
||||
def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):
|
||||
"""
|
||||
Format a numeric value using localization settings.
|
||||
|
||||
If use_l10n is provided and is not None, it forces the value to
|
||||
be localized (or not), overriding the value of settings.USE_L10N.
|
||||
"""
|
||||
if use_l10n or (use_l10n is None and settings.USE_L10N):
|
||||
lang = get_language()
|
||||
else:
|
||||
lang = None
|
||||
return numberformat.format(
|
||||
value,
|
||||
get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
|
||||
decimal_pos,
|
||||
get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
|
||||
get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
|
||||
force_grouping=force_grouping,
|
||||
use_l10n=use_l10n,
|
||||
)
|
||||
|
||||
|
||||
def localize(value, use_l10n=None):
|
||||
"""
|
||||
Check if value is a localizable type (date, number...) and return it
|
||||
formatted as a string using current locale format.
|
||||
|
||||
If use_l10n is provided and is not None, it forces the value to
|
||||
be localized (or not), overriding the value of settings.USE_L10N.
|
||||
"""
|
||||
if isinstance(value, str): # Handle strings first for performance reasons.
|
||||
return value
|
||||
elif isinstance(value, bool): # Make sure booleans don't get treated as numbers
|
||||
return str(value)
|
||||
elif isinstance(value, (decimal.Decimal, float, int)):
|
||||
return number_format(value, use_l10n=use_l10n)
|
||||
elif isinstance(value, datetime.datetime):
|
||||
return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
|
||||
elif isinstance(value, datetime.date):
|
||||
return date_format(value, use_l10n=use_l10n)
|
||||
elif isinstance(value, datetime.time):
|
||||
return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
|
||||
return value
|
||||
|
||||
|
||||
def localize_input(value, default=None):
|
||||
"""
|
||||
Check if an input value is a localizable type and return it
|
||||
formatted with the appropriate formatting string of the current locale.
|
||||
"""
|
||||
if isinstance(value, str): # Handle strings first for performance reasons.
|
||||
return value
|
||||
elif isinstance(value, bool): # Don't treat booleans as numbers.
|
||||
return str(value)
|
||||
elif isinstance(value, (decimal.Decimal, float, int)):
|
||||
return number_format(value)
|
||||
elif isinstance(value, datetime.datetime):
|
||||
value = datetime_safe.new_datetime(value)
|
||||
format = default or get_format('DATETIME_INPUT_FORMATS')[0]
|
||||
return value.strftime(format)
|
||||
elif isinstance(value, datetime.date):
|
||||
value = datetime_safe.new_date(value)
|
||||
format = default or get_format('DATE_INPUT_FORMATS')[0]
|
||||
return value.strftime(format)
|
||||
elif isinstance(value, datetime.time):
|
||||
format = default or get_format('TIME_INPUT_FORMATS')[0]
|
||||
return value.strftime(format)
|
||||
return value
|
||||
|
||||
|
||||
def sanitize_separators(value):
|
||||
"""
|
||||
Sanitize a value according to the current decimal and
|
||||
thousand separator setting. Used with form field input.
|
||||
"""
|
||||
if isinstance(value, str):
|
||||
parts = []
|
||||
decimal_separator = get_format('DECIMAL_SEPARATOR')
|
||||
if decimal_separator in value:
|
||||
value, decimals = value.split(decimal_separator, 1)
|
||||
parts.append(decimals)
|
||||
if settings.USE_THOUSAND_SEPARATOR:
|
||||
thousand_sep = get_format('THOUSAND_SEPARATOR')
|
||||
if thousand_sep == '.' and value.count('.') == 1 and len(value.split('.')[-1]) != 3:
|
||||
# Special case where we suspect a dot meant decimal separator (see #22171)
|
||||
pass
|
||||
else:
|
||||
for replacement in {
|
||||
thousand_sep, unicodedata.normalize('NFKD', thousand_sep)}:
|
||||
value = value.replace(replacement, '')
|
||||
parts.append(value)
|
||||
value = '.'.join(reversed(parts))
|
||||
return value
|
||||
401
venv/lib/python3.8/site-packages/django/utils/functional.py
Normal file
401
venv/lib/python3.8/site-packages/django/utils/functional.py
Normal file
@@ -0,0 +1,401 @@
|
||||
import copy
|
||||
import itertools
|
||||
import operator
|
||||
from functools import total_ordering, wraps
|
||||
|
||||
|
||||
class cached_property:
|
||||
"""
|
||||
Decorator that converts a method with a single self argument into a
|
||||
property cached on the instance.
|
||||
|
||||
A cached property can be made out of an existing method:
|
||||
(e.g. ``url = cached_property(get_absolute_url)``).
|
||||
The optional ``name`` argument is obsolete as of Python 3.6 and will be
|
||||
deprecated in Django 4.0 (#30127).
|
||||
"""
|
||||
name = None
|
||||
|
||||
@staticmethod
|
||||
def func(instance):
|
||||
raise TypeError(
|
||||
'Cannot use cached_property instance without calling '
|
||||
'__set_name__() on it.'
|
||||
)
|
||||
|
||||
def __init__(self, func, name=None):
|
||||
self.real_func = func
|
||||
self.__doc__ = getattr(func, '__doc__')
|
||||
|
||||
def __set_name__(self, owner, name):
|
||||
if self.name is None:
|
||||
self.name = name
|
||||
self.func = self.real_func
|
||||
elif name != self.name:
|
||||
raise TypeError(
|
||||
"Cannot assign the same cached_property to two different names "
|
||||
"(%r and %r)." % (self.name, name)
|
||||
)
|
||||
|
||||
def __get__(self, instance, cls=None):
|
||||
"""
|
||||
Call the function and put the return value in instance.__dict__ so that
|
||||
subsequent attribute access on the instance returns the cached value
|
||||
instead of calling cached_property.__get__().
|
||||
"""
|
||||
if instance is None:
|
||||
return self
|
||||
res = instance.__dict__[self.name] = self.func(instance)
|
||||
return res
|
||||
|
||||
|
||||
class Promise:
|
||||
"""
|
||||
Base class for the proxy class created in the closure of the lazy function.
|
||||
It's used to recognize promises in code.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def lazy(func, *resultclasses):
|
||||
"""
|
||||
Turn any callable into a lazy evaluated callable. result classes or types
|
||||
is required -- at least one is needed so that the automatic forcing of
|
||||
the lazy evaluation code is triggered. Results are not memoized; the
|
||||
function is evaluated on every access.
|
||||
"""
|
||||
|
||||
@total_ordering
|
||||
class __proxy__(Promise):
|
||||
"""
|
||||
Encapsulate a function call and act as a proxy for methods that are
|
||||
called on the result of that function. The function is not evaluated
|
||||
until one of the methods on the result is called.
|
||||
"""
|
||||
__prepared = False
|
||||
|
||||
def __init__(self, args, kw):
|
||||
self.__args = args
|
||||
self.__kw = kw
|
||||
if not self.__prepared:
|
||||
self.__prepare_class__()
|
||||
self.__class__.__prepared = True
|
||||
|
||||
def __reduce__(self):
|
||||
return (
|
||||
_lazy_proxy_unpickle,
|
||||
(func, self.__args, self.__kw) + resultclasses
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.__cast())
|
||||
|
||||
@classmethod
|
||||
def __prepare_class__(cls):
|
||||
for resultclass in resultclasses:
|
||||
for type_ in resultclass.mro():
|
||||
for method_name in type_.__dict__:
|
||||
# All __promise__ return the same wrapper method, they
|
||||
# look up the correct implementation when called.
|
||||
if hasattr(cls, method_name):
|
||||
continue
|
||||
meth = cls.__promise__(method_name)
|
||||
setattr(cls, method_name, meth)
|
||||
cls._delegate_bytes = bytes in resultclasses
|
||||
cls._delegate_text = str in resultclasses
|
||||
assert not (cls._delegate_bytes and cls._delegate_text), (
|
||||
"Cannot call lazy() with both bytes and text return types.")
|
||||
if cls._delegate_text:
|
||||
cls.__str__ = cls.__text_cast
|
||||
elif cls._delegate_bytes:
|
||||
cls.__bytes__ = cls.__bytes_cast
|
||||
|
||||
@classmethod
|
||||
def __promise__(cls, method_name):
|
||||
# Builds a wrapper around some magic method
|
||||
def __wrapper__(self, *args, **kw):
|
||||
# Automatically triggers the evaluation of a lazy value and
|
||||
# applies the given magic method of the result type.
|
||||
res = func(*self.__args, **self.__kw)
|
||||
return getattr(res, method_name)(*args, **kw)
|
||||
return __wrapper__
|
||||
|
||||
def __text_cast(self):
|
||||
return func(*self.__args, **self.__kw)
|
||||
|
||||
def __bytes_cast(self):
|
||||
return bytes(func(*self.__args, **self.__kw))
|
||||
|
||||
def __bytes_cast_encoded(self):
|
||||
return func(*self.__args, **self.__kw).encode()
|
||||
|
||||
def __cast(self):
|
||||
if self._delegate_bytes:
|
||||
return self.__bytes_cast()
|
||||
elif self._delegate_text:
|
||||
return self.__text_cast()
|
||||
else:
|
||||
return func(*self.__args, **self.__kw)
|
||||
|
||||
def __str__(self):
|
||||
# object defines __str__(), so __prepare_class__() won't overload
|
||||
# a __str__() method from the proxied class.
|
||||
return str(self.__cast())
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, Promise):
|
||||
other = other.__cast()
|
||||
return self.__cast() == other
|
||||
|
||||
def __lt__(self, other):
|
||||
if isinstance(other, Promise):
|
||||
other = other.__cast()
|
||||
return self.__cast() < other
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.__cast())
|
||||
|
||||
def __mod__(self, rhs):
|
||||
if self._delegate_text:
|
||||
return str(self) % rhs
|
||||
return self.__cast() % rhs
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
# Instances of this class are effectively immutable. It's just a
|
||||
# collection of functions. So we don't need to do anything
|
||||
# complicated for copying.
|
||||
memo[id(self)] = self
|
||||
return self
|
||||
|
||||
@wraps(func)
|
||||
def __wrapper__(*args, **kw):
|
||||
# Creates the proxy object, instead of the actual value.
|
||||
return __proxy__(args, kw)
|
||||
|
||||
return __wrapper__
|
||||
|
||||
|
||||
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
|
||||
return lazy(func, *resultclasses)(*args, **kwargs)
|
||||
|
||||
|
||||
def lazystr(text):
|
||||
"""
|
||||
Shortcut for the common case of a lazy callable that returns str.
|
||||
"""
|
||||
return lazy(str, str)(text)
|
||||
|
||||
|
||||
def keep_lazy(*resultclasses):
|
||||
"""
|
||||
A decorator that allows a function to be called with one or more lazy
|
||||
arguments. If none of the args are lazy, the function is evaluated
|
||||
immediately, otherwise a __proxy__ is returned that will evaluate the
|
||||
function when needed.
|
||||
"""
|
||||
if not resultclasses:
|
||||
raise TypeError("You must pass at least one argument to keep_lazy().")
|
||||
|
||||
def decorator(func):
|
||||
lazy_func = lazy(func, *resultclasses)
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
if any(isinstance(arg, Promise) for arg in itertools.chain(args, kwargs.values())):
|
||||
return lazy_func(*args, **kwargs)
|
||||
return func(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def keep_lazy_text(func):
|
||||
"""
|
||||
A decorator for functions that accept lazy arguments and return text.
|
||||
"""
|
||||
return keep_lazy(str)(func)
|
||||
|
||||
|
||||
empty = object()
|
||||
|
||||
|
||||
def new_method_proxy(func):
|
||||
def inner(self, *args):
|
||||
if self._wrapped is empty:
|
||||
self._setup()
|
||||
return func(self._wrapped, *args)
|
||||
return inner
|
||||
|
||||
|
||||
class LazyObject:
|
||||
"""
|
||||
A wrapper for another class that can be used to delay instantiation of the
|
||||
wrapped class.
|
||||
|
||||
By subclassing, you have the opportunity to intercept and alter the
|
||||
instantiation. If you don't need to do that, use SimpleLazyObject.
|
||||
"""
|
||||
|
||||
# Avoid infinite recursion when tracing __init__ (#19456).
|
||||
_wrapped = None
|
||||
|
||||
def __init__(self):
|
||||
# Note: if a subclass overrides __init__(), it will likely need to
|
||||
# override __copy__() and __deepcopy__() as well.
|
||||
self._wrapped = empty
|
||||
|
||||
__getattr__ = new_method_proxy(getattr)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name == "_wrapped":
|
||||
# Assign to __dict__ to avoid infinite __setattr__ loops.
|
||||
self.__dict__["_wrapped"] = value
|
||||
else:
|
||||
if self._wrapped is empty:
|
||||
self._setup()
|
||||
setattr(self._wrapped, name, value)
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name == "_wrapped":
|
||||
raise TypeError("can't delete _wrapped.")
|
||||
if self._wrapped is empty:
|
||||
self._setup()
|
||||
delattr(self._wrapped, name)
|
||||
|
||||
def _setup(self):
|
||||
"""
|
||||
Must be implemented by subclasses to initialize the wrapped object.
|
||||
"""
|
||||
raise NotImplementedError('subclasses of LazyObject must provide a _setup() method')
|
||||
|
||||
# Because we have messed with __class__ below, we confuse pickle as to what
|
||||
# class we are pickling. We're going to have to initialize the wrapped
|
||||
# object to successfully pickle it, so we might as well just pickle the
|
||||
# wrapped object since they're supposed to act the same way.
|
||||
#
|
||||
# Unfortunately, if we try to simply act like the wrapped object, the ruse
|
||||
# will break down when pickle gets our id(). Thus we end up with pickle
|
||||
# thinking, in effect, that we are a distinct object from the wrapped
|
||||
# object, but with the same __dict__. This can cause problems (see #25389).
|
||||
#
|
||||
# So instead, we define our own __reduce__ method and custom unpickler. We
|
||||
# pickle the wrapped object as the unpickler's argument, so that pickle
|
||||
# will pickle it normally, and then the unpickler simply returns its
|
||||
# argument.
|
||||
def __reduce__(self):
|
||||
if self._wrapped is empty:
|
||||
self._setup()
|
||||
return (unpickle_lazyobject, (self._wrapped,))
|
||||
|
||||
def __copy__(self):
|
||||
if self._wrapped is empty:
|
||||
# If uninitialized, copy the wrapper. Use type(self), not
|
||||
# self.__class__, because the latter is proxied.
|
||||
return type(self)()
|
||||
else:
|
||||
# If initialized, return a copy of the wrapped object.
|
||||
return copy.copy(self._wrapped)
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
if self._wrapped is empty:
|
||||
# We have to use type(self), not self.__class__, because the
|
||||
# latter is proxied.
|
||||
result = type(self)()
|
||||
memo[id(self)] = result
|
||||
return result
|
||||
return copy.deepcopy(self._wrapped, memo)
|
||||
|
||||
__bytes__ = new_method_proxy(bytes)
|
||||
__str__ = new_method_proxy(str)
|
||||
__bool__ = new_method_proxy(bool)
|
||||
|
||||
# Introspection support
|
||||
__dir__ = new_method_proxy(dir)
|
||||
|
||||
# Need to pretend to be the wrapped class, for the sake of objects that
|
||||
# care about this (especially in equality tests)
|
||||
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
|
||||
__eq__ = new_method_proxy(operator.eq)
|
||||
__lt__ = new_method_proxy(operator.lt)
|
||||
__gt__ = new_method_proxy(operator.gt)
|
||||
__ne__ = new_method_proxy(operator.ne)
|
||||
__hash__ = new_method_proxy(hash)
|
||||
|
||||
# List/Tuple/Dictionary methods support
|
||||
__getitem__ = new_method_proxy(operator.getitem)
|
||||
__setitem__ = new_method_proxy(operator.setitem)
|
||||
__delitem__ = new_method_proxy(operator.delitem)
|
||||
__iter__ = new_method_proxy(iter)
|
||||
__len__ = new_method_proxy(len)
|
||||
__contains__ = new_method_proxy(operator.contains)
|
||||
|
||||
|
||||
def unpickle_lazyobject(wrapped):
|
||||
"""
|
||||
Used to unpickle lazy objects. Just return its argument, which will be the
|
||||
wrapped object.
|
||||
"""
|
||||
return wrapped
|
||||
|
||||
|
||||
class SimpleLazyObject(LazyObject):
|
||||
"""
|
||||
A lazy object initialized from any function.
|
||||
|
||||
Designed for compound objects of unknown type. For builtins or objects of
|
||||
known type, use django.utils.functional.lazy.
|
||||
"""
|
||||
def __init__(self, func):
|
||||
"""
|
||||
Pass in a callable that returns the object to be wrapped.
|
||||
|
||||
If copies are made of the resulting SimpleLazyObject, which can happen
|
||||
in various circumstances within Django, then you must ensure that the
|
||||
callable can be safely run more than once and will return the same
|
||||
value.
|
||||
"""
|
||||
self.__dict__['_setupfunc'] = func
|
||||
super().__init__()
|
||||
|
||||
def _setup(self):
|
||||
self._wrapped = self._setupfunc()
|
||||
|
||||
# Return a meaningful representation of the lazy object for debugging
|
||||
# without evaluating the wrapped object.
|
||||
def __repr__(self):
|
||||
if self._wrapped is empty:
|
||||
repr_attr = self._setupfunc
|
||||
else:
|
||||
repr_attr = self._wrapped
|
||||
return '<%s: %r>' % (type(self).__name__, repr_attr)
|
||||
|
||||
def __copy__(self):
|
||||
if self._wrapped is empty:
|
||||
# If uninitialized, copy the wrapper. Use SimpleLazyObject, not
|
||||
# self.__class__, because the latter is proxied.
|
||||
return SimpleLazyObject(self._setupfunc)
|
||||
else:
|
||||
# If initialized, return a copy of the wrapped object.
|
||||
return copy.copy(self._wrapped)
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
if self._wrapped is empty:
|
||||
# We have to use SimpleLazyObject, not self.__class__, because the
|
||||
# latter is proxied.
|
||||
result = SimpleLazyObject(self._setupfunc)
|
||||
memo[id(self)] = result
|
||||
return result
|
||||
return copy.deepcopy(self._wrapped, memo)
|
||||
|
||||
|
||||
def partition(predicate, values):
|
||||
"""
|
||||
Split the values into two sets, based on the return value of the function
|
||||
(True/False). e.g.:
|
||||
|
||||
>>> partition(lambda x: x > 3, range(5))
|
||||
[0, 1, 2, 3], [4]
|
||||
"""
|
||||
results = ([], [])
|
||||
for item in values:
|
||||
results[predicate(item)].append(item)
|
||||
return results
|
||||
19
venv/lib/python3.8/site-packages/django/utils/hashable.py
Normal file
19
venv/lib/python3.8/site-packages/django/utils/hashable.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from django.utils.itercompat import is_iterable
|
||||
|
||||
|
||||
def make_hashable(value):
|
||||
if isinstance(value, dict):
|
||||
return tuple([
|
||||
(key, make_hashable(nested_value))
|
||||
for key, nested_value in value.items()
|
||||
])
|
||||
# Try hash to avoid converting a hashable iterable (e.g. string, frozenset)
|
||||
# to a tuple.
|
||||
try:
|
||||
hash(value)
|
||||
except TypeError:
|
||||
if is_iterable(value):
|
||||
return tuple(map(make_hashable, value))
|
||||
# Non-hashable, non-iterable.
|
||||
raise
|
||||
return value
|
||||
375
venv/lib/python3.8/site-packages/django/utils/html.py
Normal file
375
venv/lib/python3.8/site-packages/django/utils/html.py
Normal file
@@ -0,0 +1,375 @@
|
||||
"""HTML utilities suitable for global use."""
|
||||
|
||||
import html
|
||||
import json
|
||||
import re
|
||||
from html.parser import HTMLParser
|
||||
from urllib.parse import (
|
||||
parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit,
|
||||
)
|
||||
|
||||
from django.utils.encoding import punycode
|
||||
from django.utils.functional import Promise, keep_lazy, keep_lazy_text
|
||||
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
|
||||
from django.utils.safestring import SafeData, SafeString, mark_safe
|
||||
from django.utils.text import normalize_newlines
|
||||
|
||||
# Configuration for urlize() function.
|
||||
TRAILING_PUNCTUATION_CHARS = '.,:;!'
|
||||
WRAPPING_PUNCTUATION = [('(', ')'), ('[', ']')]
|
||||
|
||||
# List of possible strings used for bullets in bulleted lists.
|
||||
DOTS = ['·', '*', '\u2022', '•', '•', '•']
|
||||
|
||||
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
|
||||
word_split_re = re.compile(r'''([\s<>"']+)''')
|
||||
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
|
||||
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE)
|
||||
|
||||
|
||||
@keep_lazy(str, SafeString)
|
||||
def escape(text):
|
||||
"""
|
||||
Return the given text with ampersands, quotes and angle brackets encoded
|
||||
for use in HTML.
|
||||
|
||||
Always escape input, even if it's already escaped and marked as such.
|
||||
This may result in double-escaping. If this is a concern, use
|
||||
conditional_escape() instead.
|
||||
"""
|
||||
return mark_safe(html.escape(str(text)))
|
||||
|
||||
|
||||
_js_escapes = {
|
||||
ord('\\'): '\\u005C',
|
||||
ord('\''): '\\u0027',
|
||||
ord('"'): '\\u0022',
|
||||
ord('>'): '\\u003E',
|
||||
ord('<'): '\\u003C',
|
||||
ord('&'): '\\u0026',
|
||||
ord('='): '\\u003D',
|
||||
ord('-'): '\\u002D',
|
||||
ord(';'): '\\u003B',
|
||||
ord('`'): '\\u0060',
|
||||
ord('\u2028'): '\\u2028',
|
||||
ord('\u2029'): '\\u2029'
|
||||
}
|
||||
|
||||
# Escape every ASCII character with a value less than 32.
|
||||
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
|
||||
|
||||
|
||||
@keep_lazy(str, SafeString)
|
||||
def escapejs(value):
|
||||
"""Hex encode characters for use in JavaScript strings."""
|
||||
return mark_safe(str(value).translate(_js_escapes))
|
||||
|
||||
|
||||
_json_script_escapes = {
|
||||
ord('>'): '\\u003E',
|
||||
ord('<'): '\\u003C',
|
||||
ord('&'): '\\u0026',
|
||||
}
|
||||
|
||||
|
||||
def json_script(value, element_id):
|
||||
"""
|
||||
Escape all the HTML/XML special characters with their unicode escapes, so
|
||||
value is safe to be output anywhere except for inside a tag attribute. Wrap
|
||||
the escaped JSON in a script tag.
|
||||
"""
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
json_str = json.dumps(value, cls=DjangoJSONEncoder).translate(_json_script_escapes)
|
||||
return format_html(
|
||||
'<script id="{}" type="application/json">{}</script>',
|
||||
element_id, mark_safe(json_str)
|
||||
)
|
||||
|
||||
|
||||
def conditional_escape(text):
|
||||
"""
|
||||
Similar to escape(), except that it doesn't operate on pre-escaped strings.
|
||||
|
||||
This function relies on the __html__ convention used both by Django's
|
||||
SafeData class and by third-party libraries like markupsafe.
|
||||
"""
|
||||
if isinstance(text, Promise):
|
||||
text = str(text)
|
||||
if hasattr(text, '__html__'):
|
||||
return text.__html__()
|
||||
else:
|
||||
return escape(text)
|
||||
|
||||
|
||||
def format_html(format_string, *args, **kwargs):
|
||||
"""
|
||||
Similar to str.format, but pass all arguments through conditional_escape(),
|
||||
and call mark_safe() on the result. This function should be used instead
|
||||
of str.format or % interpolation to build up small HTML fragments.
|
||||
"""
|
||||
args_safe = map(conditional_escape, args)
|
||||
kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()}
|
||||
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
|
||||
|
||||
|
||||
def format_html_join(sep, format_string, args_generator):
|
||||
"""
|
||||
A wrapper of format_html, for the common case of a group of arguments that
|
||||
need to be formatted using the same format string, and then joined using
|
||||
'sep'. 'sep' is also passed through conditional_escape.
|
||||
|
||||
'args_generator' should be an iterator that returns the sequence of 'args'
|
||||
that will be passed to format_html.
|
||||
|
||||
Example:
|
||||
|
||||
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
|
||||
for u in users))
|
||||
"""
|
||||
return mark_safe(conditional_escape(sep).join(
|
||||
format_html(format_string, *args)
|
||||
for args in args_generator
|
||||
))
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def linebreaks(value, autoescape=False):
|
||||
"""Convert newlines into <p> and <br>s."""
|
||||
value = normalize_newlines(value)
|
||||
paras = re.split('\n{2,}', str(value))
|
||||
if autoescape:
|
||||
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br>') for p in paras]
|
||||
else:
|
||||
paras = ['<p>%s</p>' % p.replace('\n', '<br>') for p in paras]
|
||||
return '\n\n'.join(paras)
|
||||
|
||||
|
||||
class MLStripper(HTMLParser):
|
||||
def __init__(self):
|
||||
super().__init__(convert_charrefs=False)
|
||||
self.reset()
|
||||
self.fed = []
|
||||
|
||||
def handle_data(self, d):
|
||||
self.fed.append(d)
|
||||
|
||||
def handle_entityref(self, name):
|
||||
self.fed.append('&%s;' % name)
|
||||
|
||||
def handle_charref(self, name):
|
||||
self.fed.append('&#%s;' % name)
|
||||
|
||||
def get_data(self):
|
||||
return ''.join(self.fed)
|
||||
|
||||
|
||||
def _strip_once(value):
|
||||
"""
|
||||
Internal tag stripping utility used by strip_tags.
|
||||
"""
|
||||
s = MLStripper()
|
||||
s.feed(value)
|
||||
s.close()
|
||||
return s.get_data()
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def strip_tags(value):
|
||||
"""Return the given HTML with all tags stripped."""
|
||||
# Note: in typical case this loop executes _strip_once once. Loop condition
|
||||
# is redundant, but helps to reduce number of executions of _strip_once.
|
||||
value = str(value)
|
||||
while '<' in value and '>' in value:
|
||||
new_value = _strip_once(value)
|
||||
if value.count('<') == new_value.count('<'):
|
||||
# _strip_once wasn't able to detect more tags.
|
||||
break
|
||||
value = new_value
|
||||
return value
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def strip_spaces_between_tags(value):
|
||||
"""Return the given HTML with spaces between tags removed."""
|
||||
return re.sub(r'>\s+<', '><', str(value))
|
||||
|
||||
|
||||
def smart_urlquote(url):
|
||||
"""Quote a URL if it isn't already quoted."""
|
||||
def unquote_quote(segment):
|
||||
segment = unquote(segment)
|
||||
# Tilde is part of RFC3986 Unreserved Characters
|
||||
# https://tools.ietf.org/html/rfc3986#section-2.3
|
||||
# See also https://bugs.python.org/issue16285
|
||||
return quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + '~')
|
||||
|
||||
# Handle IDN before quoting.
|
||||
try:
|
||||
scheme, netloc, path, query, fragment = urlsplit(url)
|
||||
except ValueError:
|
||||
# invalid IPv6 URL (normally square brackets in hostname part).
|
||||
return unquote_quote(url)
|
||||
|
||||
try:
|
||||
netloc = punycode(netloc) # IDN -> ACE
|
||||
except UnicodeError: # invalid domain part
|
||||
return unquote_quote(url)
|
||||
|
||||
if query:
|
||||
# Separately unquoting key/value, so as to not mix querystring separators
|
||||
# included in query values. See #22267.
|
||||
query_parts = [(unquote(q[0]), unquote(q[1]))
|
||||
for q in parse_qsl(query, keep_blank_values=True)]
|
||||
# urlencode will take care of quoting
|
||||
query = urlencode(query_parts)
|
||||
|
||||
path = unquote_quote(path)
|
||||
fragment = unquote_quote(fragment)
|
||||
|
||||
return urlunsplit((scheme, netloc, path, query, fragment))
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
|
||||
"""
|
||||
Convert any URLs in text into clickable links.
|
||||
|
||||
Works on http://, https://, www. links, and also on links ending in one of
|
||||
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
|
||||
Links can have trailing punctuation (periods, commas, close-parens) and
|
||||
leading punctuation (opening parens) and it'll still do the right thing.
|
||||
|
||||
If trim_url_limit is not None, truncate the URLs in the link text longer
|
||||
than this limit to trim_url_limit - 1 characters and append an ellipsis.
|
||||
|
||||
If nofollow is True, give the links a rel="nofollow" attribute.
|
||||
|
||||
If autoescape is True, autoescape the link text and URLs.
|
||||
"""
|
||||
safe_input = isinstance(text, SafeData)
|
||||
|
||||
def trim_url(x, limit=trim_url_limit):
|
||||
if limit is None or len(x) <= limit:
|
||||
return x
|
||||
return '%s…' % x[:max(0, limit - 1)]
|
||||
|
||||
def trim_punctuation(lead, middle, trail):
|
||||
"""
|
||||
Trim trailing and wrapping punctuation from `middle`. Return the items
|
||||
of the new state.
|
||||
"""
|
||||
# Continue trimming until middle remains unchanged.
|
||||
trimmed_something = True
|
||||
while trimmed_something:
|
||||
trimmed_something = False
|
||||
# Trim wrapping punctuation.
|
||||
for opening, closing in WRAPPING_PUNCTUATION:
|
||||
if middle.startswith(opening):
|
||||
middle = middle[len(opening):]
|
||||
lead += opening
|
||||
trimmed_something = True
|
||||
# Keep parentheses at the end only if they're balanced.
|
||||
if (middle.endswith(closing) and
|
||||
middle.count(closing) == middle.count(opening) + 1):
|
||||
middle = middle[:-len(closing)]
|
||||
trail = closing + trail
|
||||
trimmed_something = True
|
||||
# Trim trailing punctuation (after trimming wrapping punctuation,
|
||||
# as encoded entities contain ';'). Unescape entities to avoid
|
||||
# breaking them by removing ';'.
|
||||
middle_unescaped = html.unescape(middle)
|
||||
stripped = middle_unescaped.rstrip(TRAILING_PUNCTUATION_CHARS)
|
||||
if middle_unescaped != stripped:
|
||||
trail = middle[len(stripped):] + trail
|
||||
middle = middle[:len(stripped) - len(middle_unescaped)]
|
||||
trimmed_something = True
|
||||
return lead, middle, trail
|
||||
|
||||
def is_email_simple(value):
|
||||
"""Return True if value looks like an email address."""
|
||||
# An @ must be in the middle of the value.
|
||||
if '@' not in value or value.startswith('@') or value.endswith('@'):
|
||||
return False
|
||||
try:
|
||||
p1, p2 = value.split('@')
|
||||
except ValueError:
|
||||
# value contains more than one @.
|
||||
return False
|
||||
# Dot must be in p2 (e.g. example.com)
|
||||
if '.' not in p2 or p2.startswith('.'):
|
||||
return False
|
||||
return True
|
||||
|
||||
words = word_split_re.split(str(text))
|
||||
for i, word in enumerate(words):
|
||||
if '.' in word or '@' in word or ':' in word:
|
||||
# lead: Current punctuation trimmed from the beginning of the word.
|
||||
# middle: Current state of the word.
|
||||
# trail: Current punctuation trimmed from the end of the word.
|
||||
lead, middle, trail = '', word, ''
|
||||
# Deal with punctuation.
|
||||
lead, middle, trail = trim_punctuation(lead, middle, trail)
|
||||
|
||||
# Make URL we want to point to.
|
||||
url = None
|
||||
nofollow_attr = ' rel="nofollow"' if nofollow else ''
|
||||
if simple_url_re.match(middle):
|
||||
url = smart_urlquote(html.unescape(middle))
|
||||
elif simple_url_2_re.match(middle):
|
||||
url = smart_urlquote('http://%s' % html.unescape(middle))
|
||||
elif ':' not in middle and is_email_simple(middle):
|
||||
local, domain = middle.rsplit('@', 1)
|
||||
try:
|
||||
domain = punycode(domain)
|
||||
except UnicodeError:
|
||||
continue
|
||||
url = 'mailto:%s@%s' % (local, domain)
|
||||
nofollow_attr = ''
|
||||
|
||||
# Make link.
|
||||
if url:
|
||||
trimmed = trim_url(middle)
|
||||
if autoescape and not safe_input:
|
||||
lead, trail = escape(lead), escape(trail)
|
||||
trimmed = escape(trimmed)
|
||||
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
|
||||
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
|
||||
else:
|
||||
if safe_input:
|
||||
words[i] = mark_safe(word)
|
||||
elif autoescape:
|
||||
words[i] = escape(word)
|
||||
elif safe_input:
|
||||
words[i] = mark_safe(word)
|
||||
elif autoescape:
|
||||
words[i] = escape(word)
|
||||
return ''.join(words)
|
||||
|
||||
|
||||
def avoid_wrapping(value):
|
||||
"""
|
||||
Avoid text wrapping in the middle of a phrase by adding non-breaking
|
||||
spaces where there previously were normal spaces.
|
||||
"""
|
||||
return value.replace(" ", "\xa0")
|
||||
|
||||
|
||||
def html_safe(klass):
|
||||
"""
|
||||
A decorator that defines the __html__ method. This helps non-Django
|
||||
templates to detect classes whose __str__ methods return SafeString.
|
||||
"""
|
||||
if '__html__' in klass.__dict__:
|
||||
raise ValueError(
|
||||
"can't apply @html_safe to %s because it defines "
|
||||
"__html__()." % klass.__name__
|
||||
)
|
||||
if '__str__' not in klass.__dict__:
|
||||
raise ValueError(
|
||||
"can't apply @html_safe to %s because it doesn't "
|
||||
"define __str__()." % klass.__name__
|
||||
)
|
||||
klass_str = klass.__str__
|
||||
klass.__str__ = lambda self: mark_safe(klass_str(self))
|
||||
klass.__html__ = lambda self: str(self)
|
||||
return klass
|
||||
478
venv/lib/python3.8/site-packages/django/utils/http.py
Normal file
478
venv/lib/python3.8/site-packages/django/utils/http.py
Normal file
@@ -0,0 +1,478 @@
|
||||
import base64
|
||||
import calendar
|
||||
import datetime
|
||||
import re
|
||||
import unicodedata
|
||||
import warnings
|
||||
from binascii import Error as BinasciiError
|
||||
from email.utils import formatdate
|
||||
from urllib.parse import (
|
||||
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
|
||||
quote_plus, scheme_chars, unquote, unquote_plus,
|
||||
urlencode as original_urlencode, uses_params,
|
||||
)
|
||||
|
||||
from django.core.exceptions import TooManyFieldsSent
|
||||
from django.utils.datastructures import MultiValueDict
|
||||
from django.utils.deprecation import RemovedInDjango40Warning
|
||||
from django.utils.functional import keep_lazy_text
|
||||
|
||||
# based on RFC 7232, Appendix C
|
||||
ETAG_MATCH = re.compile(r'''
|
||||
\A( # start of string and capture group
|
||||
(?:W/)? # optional weak indicator
|
||||
" # opening quote
|
||||
[^"]* # any sequence of non-quote characters
|
||||
" # end quote
|
||||
)\Z # end of string and capture group
|
||||
''', re.X)
|
||||
|
||||
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
|
||||
__D = r'(?P<day>\d{2})'
|
||||
__D2 = r'(?P<day>[ \d]\d)'
|
||||
__M = r'(?P<mon>\w{3})'
|
||||
__Y = r'(?P<year>\d{4})'
|
||||
__Y2 = r'(?P<year>\d{2})'
|
||||
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
|
||||
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
|
||||
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
|
||||
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
|
||||
|
||||
RFC3986_GENDELIMS = ":/?#[]@"
|
||||
RFC3986_SUBDELIMS = "!$&'()*+,;="
|
||||
|
||||
FIELDS_MATCH = re.compile('[&;]')
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def urlquote(url, safe='/'):
|
||||
"""
|
||||
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
|
||||
(was used for unicode handling on Python 2)
|
||||
"""
|
||||
warnings.warn(
|
||||
'django.utils.http.urlquote() is deprecated in favor of '
|
||||
'urllib.parse.quote().',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return quote(url, safe)
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def urlquote_plus(url, safe=''):
|
||||
"""
|
||||
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
|
||||
function. (was used for unicode handling on Python 2)
|
||||
"""
|
||||
warnings.warn(
|
||||
'django.utils.http.urlquote_plus() is deprecated in favor of '
|
||||
'urllib.parse.quote_plus(),',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return quote_plus(url, safe)
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def urlunquote(quoted_url):
|
||||
"""
|
||||
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
|
||||
(was used for unicode handling on Python 2)
|
||||
"""
|
||||
warnings.warn(
|
||||
'django.utils.http.urlunquote() is deprecated in favor of '
|
||||
'urllib.parse.unquote().',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return unquote(quoted_url)
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def urlunquote_plus(quoted_url):
|
||||
"""
|
||||
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
|
||||
function. (was used for unicode handling on Python 2)
|
||||
"""
|
||||
warnings.warn(
|
||||
'django.utils.http.urlunquote_plus() is deprecated in favor of '
|
||||
'urllib.parse.unquote_plus().',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return unquote_plus(quoted_url)
|
||||
|
||||
|
||||
def urlencode(query, doseq=False):
|
||||
"""
|
||||
A version of Python's urllib.parse.urlencode() function that can operate on
|
||||
MultiValueDict and non-string values.
|
||||
"""
|
||||
if isinstance(query, MultiValueDict):
|
||||
query = query.lists()
|
||||
elif hasattr(query, 'items'):
|
||||
query = query.items()
|
||||
query_params = []
|
||||
for key, value in query:
|
||||
if value is None:
|
||||
raise TypeError(
|
||||
"Cannot encode None for key '%s' in a query string. Did you "
|
||||
"mean to pass an empty string or omit the value?" % key
|
||||
)
|
||||
elif not doseq or isinstance(value, (str, bytes)):
|
||||
query_val = value
|
||||
else:
|
||||
try:
|
||||
itr = iter(value)
|
||||
except TypeError:
|
||||
query_val = value
|
||||
else:
|
||||
# Consume generators and iterators, when doseq=True, to
|
||||
# work around https://bugs.python.org/issue31706.
|
||||
query_val = []
|
||||
for item in itr:
|
||||
if item is None:
|
||||
raise TypeError(
|
||||
"Cannot encode None for key '%s' in a query "
|
||||
"string. Did you mean to pass an empty string or "
|
||||
"omit the value?" % key
|
||||
)
|
||||
elif not isinstance(item, bytes):
|
||||
item = str(item)
|
||||
query_val.append(item)
|
||||
query_params.append((key, query_val))
|
||||
return original_urlencode(query_params, doseq)
|
||||
|
||||
|
||||
def http_date(epoch_seconds=None):
|
||||
"""
|
||||
Format the time to match the RFC1123 date format as specified by HTTP
|
||||
RFC7231 section 7.1.1.1.
|
||||
|
||||
`epoch_seconds` is a floating point number expressed in seconds since the
|
||||
epoch, in UTC - such as that outputted by time.time(). If set to None, it
|
||||
defaults to the current time.
|
||||
|
||||
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
|
||||
"""
|
||||
return formatdate(epoch_seconds, usegmt=True)
|
||||
|
||||
|
||||
def parse_http_date(date):
|
||||
"""
|
||||
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
|
||||
|
||||
The three formats allowed by the RFC are accepted, even if only the first
|
||||
one is still in widespread use.
|
||||
|
||||
Return an integer expressed in seconds since the epoch, in UTC.
|
||||
"""
|
||||
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
|
||||
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
|
||||
# our own RFC-compliant parsing.
|
||||
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
|
||||
m = regex.match(date)
|
||||
if m is not None:
|
||||
break
|
||||
else:
|
||||
raise ValueError("%r is not in a valid HTTP date format" % date)
|
||||
try:
|
||||
year = int(m.group('year'))
|
||||
if year < 100:
|
||||
current_year = datetime.datetime.utcnow().year
|
||||
current_century = current_year - (current_year % 100)
|
||||
if year - (current_year % 100) > 50:
|
||||
# year that appears to be more than 50 years in the future are
|
||||
# interpreted as representing the past.
|
||||
year += current_century - 100
|
||||
else:
|
||||
year += current_century
|
||||
month = MONTHS.index(m.group('mon').lower()) + 1
|
||||
day = int(m.group('day'))
|
||||
hour = int(m.group('hour'))
|
||||
min = int(m.group('min'))
|
||||
sec = int(m.group('sec'))
|
||||
result = datetime.datetime(year, month, day, hour, min, sec)
|
||||
return calendar.timegm(result.utctimetuple())
|
||||
except Exception as exc:
|
||||
raise ValueError("%r is not a valid date" % date) from exc
|
||||
|
||||
|
||||
def parse_http_date_safe(date):
|
||||
"""
|
||||
Same as parse_http_date, but return None if the input is invalid.
|
||||
"""
|
||||
try:
|
||||
return parse_http_date(date)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
# Base 36 functions: useful for generating compact URLs
|
||||
|
||||
def base36_to_int(s):
|
||||
"""
|
||||
Convert a base 36 string to an int. Raise ValueError if the input won't fit
|
||||
into an int.
|
||||
"""
|
||||
# To prevent overconsumption of server resources, reject any
|
||||
# base36 string that is longer than 13 base36 digits (13 digits
|
||||
# is sufficient to base36-encode any 64-bit integer)
|
||||
if len(s) > 13:
|
||||
raise ValueError("Base36 input too large")
|
||||
return int(s, 36)
|
||||
|
||||
|
||||
def int_to_base36(i):
|
||||
"""Convert an integer to a base36 string."""
|
||||
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
|
||||
if i < 0:
|
||||
raise ValueError("Negative base36 conversion input.")
|
||||
if i < 36:
|
||||
return char_set[i]
|
||||
b36 = ''
|
||||
while i != 0:
|
||||
i, n = divmod(i, 36)
|
||||
b36 = char_set[n] + b36
|
||||
return b36
|
||||
|
||||
|
||||
def urlsafe_base64_encode(s):
|
||||
"""
|
||||
Encode a bytestring to a base64 string for use in URLs. Strip any trailing
|
||||
equal signs.
|
||||
"""
|
||||
return base64.urlsafe_b64encode(s).rstrip(b'\n=').decode('ascii')
|
||||
|
||||
|
||||
def urlsafe_base64_decode(s):
|
||||
"""
|
||||
Decode a base64 encoded string. Add back any trailing equal signs that
|
||||
might have been stripped.
|
||||
"""
|
||||
s = s.encode()
|
||||
try:
|
||||
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
|
||||
except (LookupError, BinasciiError) as e:
|
||||
raise ValueError(e)
|
||||
|
||||
|
||||
def parse_etags(etag_str):
|
||||
"""
|
||||
Parse a string of ETags given in an If-None-Match or If-Match header as
|
||||
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
|
||||
should be matched.
|
||||
"""
|
||||
if etag_str.strip() == '*':
|
||||
return ['*']
|
||||
else:
|
||||
# Parse each ETag individually, and return any that are valid.
|
||||
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
|
||||
return [match.group(1) for match in etag_matches if match]
|
||||
|
||||
|
||||
def quote_etag(etag_str):
|
||||
"""
|
||||
If the provided string is already a quoted ETag, return it. Otherwise, wrap
|
||||
the string in quotes, making it a strong ETag.
|
||||
"""
|
||||
if ETAG_MATCH.match(etag_str):
|
||||
return etag_str
|
||||
else:
|
||||
return '"%s"' % etag_str
|
||||
|
||||
|
||||
def is_same_domain(host, pattern):
|
||||
"""
|
||||
Return ``True`` if the host is either an exact match or a match
|
||||
to the wildcard pattern.
|
||||
|
||||
Any pattern beginning with a period matches a domain and all of its
|
||||
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
|
||||
``foo.example.com``). Anything else is an exact string match.
|
||||
"""
|
||||
if not pattern:
|
||||
return False
|
||||
|
||||
pattern = pattern.lower()
|
||||
return (
|
||||
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
|
||||
pattern == host
|
||||
)
|
||||
|
||||
|
||||
def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
|
||||
"""
|
||||
Return ``True`` if the url uses an allowed host and a safe scheme.
|
||||
|
||||
Always return ``False`` on an empty url.
|
||||
|
||||
If ``require_https`` is ``True``, only 'https' will be considered a valid
|
||||
scheme, as opposed to 'http' and 'https' with the default, ``False``.
|
||||
|
||||
Note: "True" doesn't entail that a URL is "safe". It may still be e.g.
|
||||
quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri()
|
||||
on the path component of untrusted URLs.
|
||||
"""
|
||||
if url is not None:
|
||||
url = url.strip()
|
||||
if not url:
|
||||
return False
|
||||
if allowed_hosts is None:
|
||||
allowed_hosts = set()
|
||||
elif isinstance(allowed_hosts, str):
|
||||
allowed_hosts = {allowed_hosts}
|
||||
# Chrome treats \ completely as / in paths but it could be part of some
|
||||
# basic auth credentials so we need to check both URLs.
|
||||
return (
|
||||
_url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=require_https) and
|
||||
_url_has_allowed_host_and_scheme(url.replace('\\', '/'), allowed_hosts, require_https=require_https)
|
||||
)
|
||||
|
||||
|
||||
def is_safe_url(url, allowed_hosts, require_https=False):
|
||||
warnings.warn(
|
||||
'django.utils.http.is_safe_url() is deprecated in favor of '
|
||||
'url_has_allowed_host_and_scheme().',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return url_has_allowed_host_and_scheme(url, allowed_hosts, require_https)
|
||||
|
||||
|
||||
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
|
||||
def _urlparse(url, scheme='', allow_fragments=True):
|
||||
"""Parse a URL into 6 components:
|
||||
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
|
||||
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
|
||||
Note that we don't break the components up in smaller bits
|
||||
(e.g. netloc is a single string) and we don't expand % escapes."""
|
||||
url, scheme, _coerce_result = _coerce_args(url, scheme)
|
||||
splitresult = _urlsplit(url, scheme, allow_fragments)
|
||||
scheme, netloc, url, query, fragment = splitresult
|
||||
if scheme in uses_params and ';' in url:
|
||||
url, params = _splitparams(url)
|
||||
else:
|
||||
params = ''
|
||||
result = ParseResult(scheme, netloc, url, params, query, fragment)
|
||||
return _coerce_result(result)
|
||||
|
||||
|
||||
# Copied from urllib.parse.urlsplit() with
|
||||
# https://github.com/python/cpython/pull/661 applied.
|
||||
def _urlsplit(url, scheme='', allow_fragments=True):
|
||||
"""Parse a URL into 5 components:
|
||||
<scheme>://<netloc>/<path>?<query>#<fragment>
|
||||
Return a 5-tuple: (scheme, netloc, path, query, fragment).
|
||||
Note that we don't break the components up in smaller bits
|
||||
(e.g. netloc is a single string) and we don't expand % escapes."""
|
||||
url, scheme, _coerce_result = _coerce_args(url, scheme)
|
||||
netloc = query = fragment = ''
|
||||
i = url.find(':')
|
||||
if i > 0:
|
||||
for c in url[:i]:
|
||||
if c not in scheme_chars:
|
||||
break
|
||||
else:
|
||||
scheme, url = url[:i].lower(), url[i + 1:]
|
||||
|
||||
if url[:2] == '//':
|
||||
netloc, url = _splitnetloc(url, 2)
|
||||
if (('[' in netloc and ']' not in netloc) or
|
||||
(']' in netloc and '[' not in netloc)):
|
||||
raise ValueError("Invalid IPv6 URL")
|
||||
if allow_fragments and '#' in url:
|
||||
url, fragment = url.split('#', 1)
|
||||
if '?' in url:
|
||||
url, query = url.split('?', 1)
|
||||
v = SplitResult(scheme, netloc, url, query, fragment)
|
||||
return _coerce_result(v)
|
||||
|
||||
|
||||
def _url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
|
||||
# Chrome considers any URL with more than two slashes to be absolute, but
|
||||
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
|
||||
if url.startswith('///'):
|
||||
return False
|
||||
try:
|
||||
url_info = _urlparse(url)
|
||||
except ValueError: # e.g. invalid IPv6 addresses
|
||||
return False
|
||||
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
|
||||
# In that URL, example.com is not the hostname but, a path component. However,
|
||||
# Chrome will still consider example.com to be the hostname, so we must not
|
||||
# allow this syntax.
|
||||
if not url_info.netloc and url_info.scheme:
|
||||
return False
|
||||
# Forbid URLs that start with control characters. Some browsers (like
|
||||
# Chrome) ignore quite a few control characters at the start of a
|
||||
# URL and might consider the URL as scheme relative.
|
||||
if unicodedata.category(url[0])[0] == 'C':
|
||||
return False
|
||||
scheme = url_info.scheme
|
||||
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
|
||||
if not url_info.scheme and url_info.netloc:
|
||||
scheme = 'http'
|
||||
valid_schemes = ['https'] if require_https else ['http', 'https']
|
||||
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
|
||||
(not scheme or scheme in valid_schemes))
|
||||
|
||||
|
||||
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
|
||||
errors='replace', fields_limit=None):
|
||||
"""
|
||||
Return a list of key/value tuples parsed from query string.
|
||||
|
||||
Copied from urlparse with an additional "fields_limit" argument.
|
||||
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
|
||||
|
||||
Arguments:
|
||||
|
||||
qs: percent-encoded query string to be parsed
|
||||
|
||||
keep_blank_values: flag indicating whether blank values in
|
||||
percent-encoded queries should be treated as blank strings. A
|
||||
true value indicates that blanks should be retained as blank
|
||||
strings. The default false value indicates that blank values
|
||||
are to be ignored and treated as if they were not included.
|
||||
|
||||
encoding and errors: specify how to decode percent-encoded sequences
|
||||
into Unicode characters, as accepted by the bytes.decode() method.
|
||||
|
||||
fields_limit: maximum number of fields parsed or an exception
|
||||
is raised. None means no limit and is the default.
|
||||
"""
|
||||
if fields_limit:
|
||||
pairs = FIELDS_MATCH.split(qs, fields_limit)
|
||||
if len(pairs) > fields_limit:
|
||||
raise TooManyFieldsSent(
|
||||
'The number of GET/POST parameters exceeded '
|
||||
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
|
||||
)
|
||||
else:
|
||||
pairs = FIELDS_MATCH.split(qs)
|
||||
r = []
|
||||
for name_value in pairs:
|
||||
if not name_value:
|
||||
continue
|
||||
nv = name_value.split('=', 1)
|
||||
if len(nv) != 2:
|
||||
# Handle case of a control-name with no equal sign
|
||||
if keep_blank_values:
|
||||
nv.append('')
|
||||
else:
|
||||
continue
|
||||
if nv[1] or keep_blank_values:
|
||||
name = nv[0].replace('+', ' ')
|
||||
name = unquote(name, encoding=encoding, errors=errors)
|
||||
value = nv[1].replace('+', ' ')
|
||||
value = unquote(value, encoding=encoding, errors=errors)
|
||||
r.append((name, value))
|
||||
return r
|
||||
|
||||
|
||||
def escape_leading_slashes(url):
|
||||
"""
|
||||
If redirecting to an absolute path (two leading slashes), a slash must be
|
||||
escaped to prevent browsers from handling the path as schemaless and
|
||||
redirecting to another host.
|
||||
"""
|
||||
if url.startswith('//'):
|
||||
url = '/%2F{}'.format(url[2:])
|
||||
return url
|
||||
63
venv/lib/python3.8/site-packages/django/utils/inspect.py
Normal file
63
venv/lib/python3.8/site-packages/django/utils/inspect.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import inspect
|
||||
|
||||
|
||||
def get_func_args(func):
|
||||
sig = inspect.signature(func)
|
||||
return [
|
||||
arg_name for arg_name, param in sig.parameters.items()
|
||||
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
|
||||
]
|
||||
|
||||
|
||||
def get_func_full_args(func):
|
||||
"""
|
||||
Return a list of (argument name, default value) tuples. If the argument
|
||||
does not have a default value, omit it in the tuple. Arguments such as
|
||||
*args and **kwargs are also included.
|
||||
"""
|
||||
sig = inspect.signature(func)
|
||||
args = []
|
||||
for arg_name, param in sig.parameters.items():
|
||||
name = arg_name
|
||||
# Ignore 'self'
|
||||
if name == 'self':
|
||||
continue
|
||||
if param.kind == inspect.Parameter.VAR_POSITIONAL:
|
||||
name = '*' + name
|
||||
elif param.kind == inspect.Parameter.VAR_KEYWORD:
|
||||
name = '**' + name
|
||||
if param.default != inspect.Parameter.empty:
|
||||
args.append((name, param.default))
|
||||
else:
|
||||
args.append((name,))
|
||||
return args
|
||||
|
||||
|
||||
def func_accepts_kwargs(func):
|
||||
return any(
|
||||
p for p in inspect.signature(func).parameters.values()
|
||||
if p.kind == p.VAR_KEYWORD
|
||||
)
|
||||
|
||||
|
||||
def func_accepts_var_args(func):
|
||||
"""
|
||||
Return True if function 'func' accepts positional arguments *args.
|
||||
"""
|
||||
return any(
|
||||
p for p in inspect.signature(func).parameters.values()
|
||||
if p.kind == p.VAR_POSITIONAL
|
||||
)
|
||||
|
||||
|
||||
def method_has_no_args(meth):
|
||||
"""Return True if a method only accepts 'self'."""
|
||||
count = len([
|
||||
p for p in inspect.signature(meth).parameters.values()
|
||||
if p.kind == p.POSITIONAL_OR_KEYWORD
|
||||
])
|
||||
return count == 0 if inspect.ismethod(meth) else count == 1
|
||||
|
||||
|
||||
def func_supports_parameter(func, parameter):
|
||||
return parameter in inspect.signature(func).parameters
|
||||
46
venv/lib/python3.8/site-packages/django/utils/ipv6.py
Normal file
46
venv/lib/python3.8/site-packages/django/utils/ipv6.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import ipaddress
|
||||
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
def clean_ipv6_address(ip_str, unpack_ipv4=False,
|
||||
error_message=_("This is not a valid IPv6 address.")):
|
||||
"""
|
||||
Clean an IPv6 address string.
|
||||
|
||||
Raise ValidationError if the address is invalid.
|
||||
|
||||
Replace the longest continuous zero-sequence with "::", remove leading
|
||||
zeroes, and make sure all hextets are lowercase.
|
||||
|
||||
Args:
|
||||
ip_str: A valid IPv6 address.
|
||||
unpack_ipv4: if an IPv4-mapped address is found,
|
||||
return the plain IPv4 address (default=False).
|
||||
error_message: An error message used in the ValidationError.
|
||||
|
||||
Return a compressed IPv6 address or the same value.
|
||||
"""
|
||||
try:
|
||||
addr = ipaddress.IPv6Address(int(ipaddress.IPv6Address(ip_str)))
|
||||
except ValueError:
|
||||
raise ValidationError(error_message, code='invalid')
|
||||
|
||||
if unpack_ipv4 and addr.ipv4_mapped:
|
||||
return str(addr.ipv4_mapped)
|
||||
elif addr.ipv4_mapped:
|
||||
return '::ffff:%s' % str(addr.ipv4_mapped)
|
||||
|
||||
return str(addr)
|
||||
|
||||
|
||||
def is_valid_ipv6_address(ip_str):
|
||||
"""
|
||||
Return whether or not the `ip_str` string is a valid IPv6 address.
|
||||
"""
|
||||
try:
|
||||
ipaddress.IPv6Address(ip_str)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
@@ -0,0 +1,8 @@
|
||||
def is_iterable(x):
|
||||
"An implementation independent way of checking for iterables"
|
||||
try:
|
||||
iter(x)
|
||||
except TypeError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
220
venv/lib/python3.8/site-packages/django/utils/jslex.py
Normal file
220
venv/lib/python3.8/site-packages/django/utils/jslex.py
Normal file
@@ -0,0 +1,220 @@
|
||||
"""JsLex: a lexer for Javascript"""
|
||||
# Originally from https://bitbucket.org/ned/jslex
|
||||
import re
|
||||
|
||||
|
||||
class Tok:
|
||||
"""
|
||||
A specification for a token class.
|
||||
"""
|
||||
num = 0
|
||||
|
||||
def __init__(self, name, regex, next=None):
|
||||
self.id = Tok.num
|
||||
Tok.num += 1
|
||||
self.name = name
|
||||
self.regex = regex
|
||||
self.next = next
|
||||
|
||||
|
||||
def literals(choices, prefix="", suffix=""):
|
||||
"""
|
||||
Create a regex from a space-separated list of literal `choices`.
|
||||
|
||||
If provided, `prefix` and `suffix` will be attached to each choice
|
||||
individually.
|
||||
"""
|
||||
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
|
||||
|
||||
|
||||
class Lexer:
|
||||
"""
|
||||
A generic multi-state regex-based lexer.
|
||||
"""
|
||||
|
||||
def __init__(self, states, first):
|
||||
self.regexes = {}
|
||||
self.toks = {}
|
||||
|
||||
for state, rules in states.items():
|
||||
parts = []
|
||||
for tok in rules:
|
||||
groupid = "t%d" % tok.id
|
||||
self.toks[groupid] = tok
|
||||
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
|
||||
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE)
|
||||
|
||||
self.state = first
|
||||
|
||||
def lex(self, text):
|
||||
"""
|
||||
Lexically analyze `text`.
|
||||
|
||||
Yield pairs (`name`, `tokentext`).
|
||||
"""
|
||||
end = len(text)
|
||||
state = self.state
|
||||
regexes = self.regexes
|
||||
toks = self.toks
|
||||
start = 0
|
||||
|
||||
while start < end:
|
||||
for match in regexes[state].finditer(text, start):
|
||||
name = match.lastgroup
|
||||
tok = toks[name]
|
||||
toktext = match.group(name)
|
||||
start += len(toktext)
|
||||
yield (tok.name, toktext)
|
||||
|
||||
if tok.next:
|
||||
state = tok.next
|
||||
break
|
||||
|
||||
self.state = state
|
||||
|
||||
|
||||
class JsLexer(Lexer):
|
||||
"""
|
||||
A Javascript lexer
|
||||
|
||||
>>> lexer = JsLexer()
|
||||
>>> list(lexer.lex("a = 1"))
|
||||
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
|
||||
|
||||
This doesn't properly handle non-ASCII characters in the Javascript source.
|
||||
"""
|
||||
|
||||
# Because these tokens are matched as alternatives in a regex, longer
|
||||
# possibilities must appear in the list before shorter ones, for example,
|
||||
# '>>' before '>'.
|
||||
#
|
||||
# Note that we don't have to detect malformed Javascript, only properly
|
||||
# lex correct Javascript, so much of this is simplified.
|
||||
|
||||
# Details of Javascript lexical structure are taken from
|
||||
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
|
||||
|
||||
# A useful explanation of automatic semicolon insertion is at
|
||||
# http://inimino.org/~inimino/blog/javascript_semicolons
|
||||
|
||||
both_before = [
|
||||
Tok("comment", r"/\*(.|\n)*?\*/"),
|
||||
Tok("linecomment", r"//.*?$"),
|
||||
Tok("ws", r"\s+"),
|
||||
Tok("keyword", literals("""
|
||||
break case catch class const continue debugger
|
||||
default delete do else enum export extends
|
||||
finally for function if import in instanceof
|
||||
new return super switch this throw try typeof
|
||||
var void while with
|
||||
""", suffix=r"\b"), next='reg'),
|
||||
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
|
||||
Tok("id", r"""
|
||||
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
|
||||
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
|
||||
""", next='div'),
|
||||
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
|
||||
Tok("onum", r"0[0-7]+"),
|
||||
Tok("dnum", r"""
|
||||
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
|
||||
\. # dot
|
||||
[0-9]* # DecimalDigits-opt
|
||||
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
||||
|
|
||||
\. # dot
|
||||
[0-9]+ # DecimalDigits
|
||||
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
||||
|
|
||||
(0|[1-9][0-9]*) # DecimalIntegerLiteral
|
||||
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
||||
)
|
||||
""", next='div'),
|
||||
Tok("punct", literals("""
|
||||
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|
||||
|| += -= *= %= &= |= ^=
|
||||
"""), next="reg"),
|
||||
Tok("punct", literals("++ -- ) ]"), next='div'),
|
||||
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
|
||||
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
|
||||
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
|
||||
]
|
||||
|
||||
both_after = [
|
||||
Tok("other", r"."),
|
||||
]
|
||||
|
||||
states = {
|
||||
# slash will mean division
|
||||
'div': both_before + [
|
||||
Tok("punct", literals("/= /"), next='reg'),
|
||||
] + both_after,
|
||||
|
||||
# slash will mean regex
|
||||
'reg': both_before + [
|
||||
Tok("regex",
|
||||
r"""
|
||||
/ # opening slash
|
||||
# First character is..
|
||||
( [^*\\/[] # anything but * \ / or [
|
||||
| \\. # or an escape sequence
|
||||
| \[ # or a class, which has
|
||||
( [^\]\\] # anything but \ or ]
|
||||
| \\. # or an escape sequence
|
||||
)* # many times
|
||||
\]
|
||||
)
|
||||
# Following characters are same, except for excluding a star
|
||||
( [^\\/[] # anything but \ / or [
|
||||
| \\. # or an escape sequence
|
||||
| \[ # or a class, which has
|
||||
( [^\]\\] # anything but \ or ]
|
||||
| \\. # or an escape sequence
|
||||
)* # many times
|
||||
\]
|
||||
)* # many times
|
||||
/ # closing slash
|
||||
[a-zA-Z0-9]* # trailing flags
|
||||
""", next='div'),
|
||||
] + both_after,
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(self.states, 'reg')
|
||||
|
||||
|
||||
def prepare_js_for_gettext(js):
|
||||
"""
|
||||
Convert the Javascript source `js` into something resembling C for
|
||||
xgettext.
|
||||
|
||||
What actually happens is that all the regex literals are replaced with
|
||||
"REGEX".
|
||||
"""
|
||||
def escape_quotes(m):
|
||||
"""Used in a regex to properly escape double quotes."""
|
||||
s = m.group(0)
|
||||
if s == '"':
|
||||
return r'\"'
|
||||
else:
|
||||
return s
|
||||
|
||||
lexer = JsLexer()
|
||||
c = []
|
||||
for name, tok in lexer.lex(js):
|
||||
if name == 'regex':
|
||||
# C doesn't grok regexes, and they aren't needed for gettext,
|
||||
# so just output a string instead.
|
||||
tok = '"REGEX"'
|
||||
elif name == 'string':
|
||||
# C doesn't have single-quoted strings, so make all strings
|
||||
# double-quoted.
|
||||
if tok.startswith("'"):
|
||||
guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
|
||||
tok = '"' + guts + '"'
|
||||
elif name == 'id':
|
||||
# C can't deal with Unicode escapes in identifiers. We don't
|
||||
# need them for gettext anyway, so replace them with something
|
||||
# innocuous
|
||||
tok = tok.replace("\\", "U")
|
||||
c.append(tok)
|
||||
return ''.join(c)
|
||||
230
venv/lib/python3.8/site-packages/django/utils/log.py
Normal file
230
venv/lib/python3.8/site-packages/django/utils/log.py
Normal file
@@ -0,0 +1,230 @@
|
||||
import logging
|
||||
import logging.config # needed when logging_config doesn't start with logging.config
|
||||
from copy import copy
|
||||
|
||||
from django.conf import settings
|
||||
from django.core import mail
|
||||
from django.core.mail import get_connection
|
||||
from django.core.management.color import color_style
|
||||
from django.utils.module_loading import import_string
|
||||
|
||||
request_logger = logging.getLogger('django.request')
|
||||
|
||||
# Default logging for Django. This sends an email to the site admins on every
|
||||
# HTTP 500 error. Depending on DEBUG, all other log records are either sent to
|
||||
# the console (DEBUG=True) or discarded (DEBUG=False) by means of the
|
||||
# require_debug_true filter.
|
||||
DEFAULT_LOGGING = {
|
||||
'version': 1,
|
||||
'disable_existing_loggers': False,
|
||||
'filters': {
|
||||
'require_debug_false': {
|
||||
'()': 'django.utils.log.RequireDebugFalse',
|
||||
},
|
||||
'require_debug_true': {
|
||||
'()': 'django.utils.log.RequireDebugTrue',
|
||||
},
|
||||
},
|
||||
'formatters': {
|
||||
'django.server': {
|
||||
'()': 'django.utils.log.ServerFormatter',
|
||||
'format': '[{server_time}] {message}',
|
||||
'style': '{',
|
||||
}
|
||||
},
|
||||
'handlers': {
|
||||
'console': {
|
||||
'level': 'INFO',
|
||||
'filters': ['require_debug_true'],
|
||||
'class': 'logging.StreamHandler',
|
||||
},
|
||||
'django.server': {
|
||||
'level': 'INFO',
|
||||
'class': 'logging.StreamHandler',
|
||||
'formatter': 'django.server',
|
||||
},
|
||||
'mail_admins': {
|
||||
'level': 'ERROR',
|
||||
'filters': ['require_debug_false'],
|
||||
'class': 'django.utils.log.AdminEmailHandler'
|
||||
}
|
||||
},
|
||||
'loggers': {
|
||||
'django': {
|
||||
'handlers': ['console', 'mail_admins'],
|
||||
'level': 'INFO',
|
||||
},
|
||||
'django.server': {
|
||||
'handlers': ['django.server'],
|
||||
'level': 'INFO',
|
||||
'propagate': False,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def configure_logging(logging_config, logging_settings):
|
||||
if logging_config:
|
||||
# First find the logging configuration function ...
|
||||
logging_config_func = import_string(logging_config)
|
||||
|
||||
logging.config.dictConfig(DEFAULT_LOGGING)
|
||||
|
||||
# ... then invoke it with the logging settings
|
||||
if logging_settings:
|
||||
logging_config_func(logging_settings)
|
||||
|
||||
|
||||
class AdminEmailHandler(logging.Handler):
|
||||
"""An exception log handler that emails log entries to site admins.
|
||||
|
||||
If the request is passed as the first argument to the log record,
|
||||
request data will be provided in the email report.
|
||||
"""
|
||||
|
||||
def __init__(self, include_html=False, email_backend=None, reporter_class=None):
|
||||
super().__init__()
|
||||
self.include_html = include_html
|
||||
self.email_backend = email_backend
|
||||
self.reporter_class = import_string(reporter_class or 'django.views.debug.ExceptionReporter')
|
||||
|
||||
def emit(self, record):
|
||||
try:
|
||||
request = record.request
|
||||
subject = '%s (%s IP): %s' % (
|
||||
record.levelname,
|
||||
('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
|
||||
else 'EXTERNAL'),
|
||||
record.getMessage()
|
||||
)
|
||||
except Exception:
|
||||
subject = '%s: %s' % (
|
||||
record.levelname,
|
||||
record.getMessage()
|
||||
)
|
||||
request = None
|
||||
subject = self.format_subject(subject)
|
||||
|
||||
# Since we add a nicely formatted traceback on our own, create a copy
|
||||
# of the log record without the exception data.
|
||||
no_exc_record = copy(record)
|
||||
no_exc_record.exc_info = None
|
||||
no_exc_record.exc_text = None
|
||||
|
||||
if record.exc_info:
|
||||
exc_info = record.exc_info
|
||||
else:
|
||||
exc_info = (None, record.getMessage(), None)
|
||||
|
||||
reporter = self.reporter_class(request, is_email=True, *exc_info)
|
||||
message = "%s\n\n%s" % (self.format(no_exc_record), reporter.get_traceback_text())
|
||||
html_message = reporter.get_traceback_html() if self.include_html else None
|
||||
self.send_mail(subject, message, fail_silently=True, html_message=html_message)
|
||||
|
||||
def send_mail(self, subject, message, *args, **kwargs):
|
||||
mail.mail_admins(subject, message, *args, connection=self.connection(), **kwargs)
|
||||
|
||||
def connection(self):
|
||||
return get_connection(backend=self.email_backend, fail_silently=True)
|
||||
|
||||
def format_subject(self, subject):
|
||||
"""
|
||||
Escape CR and LF characters.
|
||||
"""
|
||||
return subject.replace('\n', '\\n').replace('\r', '\\r')
|
||||
|
||||
|
||||
class CallbackFilter(logging.Filter):
|
||||
"""
|
||||
A logging filter that checks the return value of a given callable (which
|
||||
takes the record-to-be-logged as its only parameter) to decide whether to
|
||||
log a record.
|
||||
"""
|
||||
def __init__(self, callback):
|
||||
self.callback = callback
|
||||
|
||||
def filter(self, record):
|
||||
if self.callback(record):
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
class RequireDebugFalse(logging.Filter):
|
||||
def filter(self, record):
|
||||
return not settings.DEBUG
|
||||
|
||||
|
||||
class RequireDebugTrue(logging.Filter):
|
||||
def filter(self, record):
|
||||
return settings.DEBUG
|
||||
|
||||
|
||||
class ServerFormatter(logging.Formatter):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.style = color_style()
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def format(self, record):
|
||||
msg = record.msg
|
||||
status_code = getattr(record, 'status_code', None)
|
||||
|
||||
if status_code:
|
||||
if 200 <= status_code < 300:
|
||||
# Put 2XX first, since it should be the common case
|
||||
msg = self.style.HTTP_SUCCESS(msg)
|
||||
elif 100 <= status_code < 200:
|
||||
msg = self.style.HTTP_INFO(msg)
|
||||
elif status_code == 304:
|
||||
msg = self.style.HTTP_NOT_MODIFIED(msg)
|
||||
elif 300 <= status_code < 400:
|
||||
msg = self.style.HTTP_REDIRECT(msg)
|
||||
elif status_code == 404:
|
||||
msg = self.style.HTTP_NOT_FOUND(msg)
|
||||
elif 400 <= status_code < 500:
|
||||
msg = self.style.HTTP_BAD_REQUEST(msg)
|
||||
else:
|
||||
# Any 5XX, or any other status code
|
||||
msg = self.style.HTTP_SERVER_ERROR(msg)
|
||||
|
||||
if self.uses_server_time() and not hasattr(record, 'server_time'):
|
||||
record.server_time = self.formatTime(record, self.datefmt)
|
||||
|
||||
record.msg = msg
|
||||
return super().format(record)
|
||||
|
||||
def uses_server_time(self):
|
||||
return self._fmt.find('{server_time}') >= 0
|
||||
|
||||
|
||||
def log_response(message, *args, response=None, request=None, logger=request_logger, level=None, exc_info=None):
|
||||
"""
|
||||
Log errors based on HttpResponse status.
|
||||
|
||||
Log 5xx responses as errors and 4xx responses as warnings (unless a level
|
||||
is given as a keyword argument). The HttpResponse status_code and the
|
||||
request are passed to the logger's extra parameter.
|
||||
"""
|
||||
# Check if the response has already been logged. Multiple requests to log
|
||||
# the same response can be received in some cases, e.g., when the
|
||||
# response is the result of an exception and is logged at the time the
|
||||
# exception is caught so that the exc_info can be recorded.
|
||||
if getattr(response, '_has_been_logged', False):
|
||||
return
|
||||
|
||||
if level is None:
|
||||
if response.status_code >= 500:
|
||||
level = 'error'
|
||||
elif response.status_code >= 400:
|
||||
level = 'warning'
|
||||
else:
|
||||
level = 'info'
|
||||
|
||||
getattr(logger, level)(
|
||||
message, *args,
|
||||
extra={
|
||||
'status_code': response.status_code,
|
||||
'request': request,
|
||||
},
|
||||
exc_info=exc_info,
|
||||
)
|
||||
response._has_been_logged = True
|
||||
114
venv/lib/python3.8/site-packages/django/utils/lorem_ipsum.py
Normal file
114
venv/lib/python3.8/site-packages/django/utils/lorem_ipsum.py
Normal file
@@ -0,0 +1,114 @@
|
||||
"""
|
||||
Utility functions for generating "lorem ipsum" Latin text.
|
||||
"""
|
||||
|
||||
import random
|
||||
|
||||
COMMON_P = (
|
||||
'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod '
|
||||
'tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim '
|
||||
'veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea '
|
||||
'commodo consequat. Duis aute irure dolor in reprehenderit in voluptate '
|
||||
'velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint '
|
||||
'occaecat cupidatat non proident, sunt in culpa qui officia deserunt '
|
||||
'mollit anim id est laborum.'
|
||||
)
|
||||
|
||||
WORDS = (
|
||||
'exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
|
||||
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
|
||||
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
|
||||
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
|
||||
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
|
||||
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
|
||||
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
|
||||
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
|
||||
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
|
||||
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
|
||||
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
|
||||
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
|
||||
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
|
||||
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
|
||||
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
|
||||
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
|
||||
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
|
||||
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
|
||||
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
|
||||
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
|
||||
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
|
||||
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
|
||||
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
|
||||
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
|
||||
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
|
||||
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
|
||||
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
|
||||
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
|
||||
'maxime', 'corrupti',
|
||||
)
|
||||
|
||||
COMMON_WORDS = (
|
||||
'lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
|
||||
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
|
||||
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua',
|
||||
)
|
||||
|
||||
|
||||
def sentence():
|
||||
"""
|
||||
Return a randomly generated sentence of lorem ipsum text.
|
||||
|
||||
The first word is capitalized, and the sentence ends in either a period or
|
||||
question mark. Commas are added at random.
|
||||
"""
|
||||
# Determine the number of comma-separated sections and number of words in
|
||||
# each section for this sentence.
|
||||
sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
|
||||
s = ', '.join(sections)
|
||||
# Convert to sentence case and add end punctuation.
|
||||
return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
|
||||
|
||||
|
||||
def paragraph():
|
||||
"""
|
||||
Return a randomly generated paragraph of lorem ipsum text.
|
||||
|
||||
The paragraph consists of between 1 and 4 sentences, inclusive.
|
||||
"""
|
||||
return ' '.join(sentence() for i in range(random.randint(1, 4)))
|
||||
|
||||
|
||||
def paragraphs(count, common=True):
|
||||
"""
|
||||
Return a list of paragraphs as returned by paragraph().
|
||||
|
||||
If `common` is True, then the first paragraph will be the standard
|
||||
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
|
||||
Latin text. Either way, subsequent paragraphs will be random Latin text.
|
||||
"""
|
||||
paras = []
|
||||
for i in range(count):
|
||||
if common and i == 0:
|
||||
paras.append(COMMON_P)
|
||||
else:
|
||||
paras.append(paragraph())
|
||||
return paras
|
||||
|
||||
|
||||
def words(count, common=True):
|
||||
"""
|
||||
Return a string of `count` lorem ipsum words separated by a single space.
|
||||
|
||||
If `common` is True, then the first 19 words will be the standard
|
||||
'lorem ipsum' words. Otherwise, all words will be selected randomly.
|
||||
"""
|
||||
word_list = list(COMMON_WORDS) if common else []
|
||||
c = len(word_list)
|
||||
if count > c:
|
||||
count -= c
|
||||
while count > 0:
|
||||
c = min(count, len(WORDS))
|
||||
count -= c
|
||||
word_list += random.sample(WORDS, c)
|
||||
else:
|
||||
word_list = word_list[:count]
|
||||
return ' '.join(word_list)
|
||||
@@ -0,0 +1,97 @@
|
||||
import copy
|
||||
import os
|
||||
from importlib import import_module
|
||||
from importlib.util import find_spec as importlib_find
|
||||
|
||||
|
||||
def import_string(dotted_path):
|
||||
"""
|
||||
Import a dotted module path and return the attribute/class designated by the
|
||||
last name in the path. Raise ImportError if the import failed.
|
||||
"""
|
||||
try:
|
||||
module_path, class_name = dotted_path.rsplit('.', 1)
|
||||
except ValueError as err:
|
||||
raise ImportError("%s doesn't look like a module path" % dotted_path) from err
|
||||
|
||||
module = import_module(module_path)
|
||||
|
||||
try:
|
||||
return getattr(module, class_name)
|
||||
except AttributeError as err:
|
||||
raise ImportError('Module "%s" does not define a "%s" attribute/class' % (
|
||||
module_path, class_name)
|
||||
) from err
|
||||
|
||||
|
||||
def autodiscover_modules(*args, **kwargs):
|
||||
"""
|
||||
Auto-discover INSTALLED_APPS modules and fail silently when
|
||||
not present. This forces an import on them to register any admin bits they
|
||||
may want.
|
||||
|
||||
You may provide a register_to keyword parameter as a way to access a
|
||||
registry. This register_to object must have a _registry instance variable
|
||||
to access it.
|
||||
"""
|
||||
from django.apps import apps
|
||||
|
||||
register_to = kwargs.get('register_to')
|
||||
for app_config in apps.get_app_configs():
|
||||
for module_to_search in args:
|
||||
# Attempt to import the app's module.
|
||||
try:
|
||||
if register_to:
|
||||
before_import_registry = copy.copy(register_to._registry)
|
||||
|
||||
import_module('%s.%s' % (app_config.name, module_to_search))
|
||||
except Exception:
|
||||
# Reset the registry to the state before the last import
|
||||
# as this import will have to reoccur on the next request and
|
||||
# this could raise NotRegistered and AlreadyRegistered
|
||||
# exceptions (see #8245).
|
||||
if register_to:
|
||||
register_to._registry = before_import_registry
|
||||
|
||||
# Decide whether to bubble up this error. If the app just
|
||||
# doesn't have the module in question, we can ignore the error
|
||||
# attempting to import it, otherwise we want it to bubble up.
|
||||
if module_has_submodule(app_config.module, module_to_search):
|
||||
raise
|
||||
|
||||
|
||||
def module_has_submodule(package, module_name):
|
||||
"""See if 'module' is in 'package'."""
|
||||
try:
|
||||
package_name = package.__name__
|
||||
package_path = package.__path__
|
||||
except AttributeError:
|
||||
# package isn't a package.
|
||||
return False
|
||||
|
||||
full_module_name = package_name + '.' + module_name
|
||||
try:
|
||||
return importlib_find(full_module_name, package_path) is not None
|
||||
except (ModuleNotFoundError, AttributeError):
|
||||
# When module_name is an invalid dotted path, Python raises
|
||||
# ModuleNotFoundError. AttributeError is raised on PY36 (fixed in PY37)
|
||||
# if the penultimate part of the path is not a package.
|
||||
return False
|
||||
|
||||
|
||||
def module_dir(module):
|
||||
"""
|
||||
Find the name of the directory that contains a module, if possible.
|
||||
|
||||
Raise ValueError otherwise, e.g. for namespace packages that are split
|
||||
over several directories.
|
||||
"""
|
||||
# Convert to list because _NamespacePath does not support indexing.
|
||||
paths = list(getattr(module, '__path__', []))
|
||||
if len(paths) == 1:
|
||||
return paths[0]
|
||||
else:
|
||||
filename = getattr(module, '__file__', None)
|
||||
if filename is not None:
|
||||
return os.path.dirname(filename)
|
||||
raise ValueError("Cannot determine directory containing %s" % module)
|
||||
@@ -0,0 +1,87 @@
|
||||
from decimal import Decimal
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.safestring import mark_safe
|
||||
|
||||
|
||||
def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
|
||||
force_grouping=False, use_l10n=None):
|
||||
"""
|
||||
Get a number (as a number or string), and return it as a string,
|
||||
using formats defined as arguments:
|
||||
|
||||
* decimal_sep: Decimal separator symbol (for example ".")
|
||||
* decimal_pos: Number of decimal positions
|
||||
* grouping: Number of digits in every group limited by thousand separator.
|
||||
For non-uniform digit grouping, it can be a sequence with the number
|
||||
of digit group sizes following the format used by the Python locale
|
||||
module in locale.localeconv() LC_NUMERIC grouping (e.g. (3, 2, 0)).
|
||||
* thousand_sep: Thousand separator symbol (for example ",")
|
||||
"""
|
||||
use_grouping = (use_l10n or (use_l10n is None and settings.USE_L10N)) and settings.USE_THOUSAND_SEPARATOR
|
||||
use_grouping = use_grouping or force_grouping
|
||||
use_grouping = use_grouping and grouping != 0
|
||||
# Make the common case fast
|
||||
if isinstance(number, int) and not use_grouping and not decimal_pos:
|
||||
return mark_safe(number)
|
||||
# sign
|
||||
sign = ''
|
||||
if isinstance(number, Decimal):
|
||||
|
||||
if decimal_pos is not None:
|
||||
# If the provided number is too small to affect any of the visible
|
||||
# decimal places, consider it equal to '0'.
|
||||
cutoff = Decimal('0.' + '1'.rjust(decimal_pos, '0'))
|
||||
if abs(number) < cutoff:
|
||||
number = Decimal('0')
|
||||
|
||||
# Format values with more than 200 digits (an arbitrary cutoff) using
|
||||
# scientific notation to avoid high memory usage in {:f}'.format().
|
||||
_, digits, exponent = number.as_tuple()
|
||||
if abs(exponent) + len(digits) > 200:
|
||||
number = '{:e}'.format(number)
|
||||
coefficient, exponent = number.split('e')
|
||||
# Format the coefficient.
|
||||
coefficient = format(
|
||||
coefficient, decimal_sep, decimal_pos, grouping,
|
||||
thousand_sep, force_grouping, use_l10n,
|
||||
)
|
||||
return '{}e{}'.format(coefficient, exponent)
|
||||
else:
|
||||
str_number = '{:f}'.format(number)
|
||||
else:
|
||||
str_number = str(number)
|
||||
if str_number[0] == '-':
|
||||
sign = '-'
|
||||
str_number = str_number[1:]
|
||||
# decimal part
|
||||
if '.' in str_number:
|
||||
int_part, dec_part = str_number.split('.')
|
||||
if decimal_pos is not None:
|
||||
dec_part = dec_part[:decimal_pos]
|
||||
else:
|
||||
int_part, dec_part = str_number, ''
|
||||
if decimal_pos is not None:
|
||||
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
|
||||
dec_part = dec_part and decimal_sep + dec_part
|
||||
# grouping
|
||||
if use_grouping:
|
||||
try:
|
||||
# if grouping is a sequence
|
||||
intervals = list(grouping)
|
||||
except TypeError:
|
||||
# grouping is a single value
|
||||
intervals = [grouping, 0]
|
||||
active_interval = intervals.pop(0)
|
||||
int_part_gd = ''
|
||||
cnt = 0
|
||||
for digit in int_part[::-1]:
|
||||
if cnt and cnt == active_interval:
|
||||
if intervals:
|
||||
active_interval = intervals.pop(0) or active_interval
|
||||
int_part_gd += thousand_sep[::-1]
|
||||
cnt = 0
|
||||
int_part_gd += digit
|
||||
cnt += 1
|
||||
int_part = int_part_gd[::-1]
|
||||
return sign + int_part + dec_part
|
||||
333
venv/lib/python3.8/site-packages/django/utils/regex_helper.py
Normal file
333
venv/lib/python3.8/site-packages/django/utils/regex_helper.py
Normal file
@@ -0,0 +1,333 @@
|
||||
"""
|
||||
Functions for reversing a regular expression (used in reverse URL resolving).
|
||||
Used internally by Django and not intended for external use.
|
||||
|
||||
This is not, and is not intended to be, a complete reg-exp decompiler. It
|
||||
should be good enough for a large class of URLS, however.
|
||||
"""
|
||||
# Mapping of an escape character to a representative of that class. So, e.g.,
|
||||
# "\w" is replaced by "x" in a reverse URL. A value of None means to ignore
|
||||
# this sequence. Any missing key is mapped to itself.
|
||||
ESCAPE_MAPPINGS = {
|
||||
"A": None,
|
||||
"b": None,
|
||||
"B": None,
|
||||
"d": "0",
|
||||
"D": "x",
|
||||
"s": " ",
|
||||
"S": "x",
|
||||
"w": "x",
|
||||
"W": "!",
|
||||
"Z": None,
|
||||
}
|
||||
|
||||
|
||||
class Choice(list):
|
||||
"""Represent multiple possibilities at this point in a pattern string."""
|
||||
|
||||
|
||||
class Group(list):
|
||||
"""Represent a capturing group in the pattern string."""
|
||||
|
||||
|
||||
class NonCapture(list):
|
||||
"""Represent a non-capturing group in the pattern string."""
|
||||
|
||||
|
||||
def normalize(pattern):
|
||||
r"""
|
||||
Given a reg-exp pattern, normalize it to an iterable of forms that
|
||||
suffice for reverse matching. This does the following:
|
||||
|
||||
(1) For any repeating sections, keeps the minimum number of occurrences
|
||||
permitted (this means zero for optional groups).
|
||||
(2) If an optional group includes parameters, include one occurrence of
|
||||
that group (along with the zero occurrence case from step (1)).
|
||||
(3) Select the first (essentially an arbitrary) element from any character
|
||||
class. Select an arbitrary character for any unordered class (e.g. '.'
|
||||
or '\w') in the pattern.
|
||||
(4) Ignore look-ahead and look-behind assertions.
|
||||
(5) Raise an error on any disjunctive ('|') constructs.
|
||||
|
||||
Django's URLs for forward resolving are either all positional arguments or
|
||||
all keyword arguments. That is assumed here, as well. Although reverse
|
||||
resolving can be done using positional args when keyword args are
|
||||
specified, the two cannot be mixed in the same reverse() call.
|
||||
"""
|
||||
# Do a linear scan to work out the special features of this pattern. The
|
||||
# idea is that we scan once here and collect all the information we need to
|
||||
# make future decisions.
|
||||
result = []
|
||||
non_capturing_groups = []
|
||||
consume_next = True
|
||||
pattern_iter = next_char(iter(pattern))
|
||||
num_args = 0
|
||||
|
||||
# A "while" loop is used here because later on we need to be able to peek
|
||||
# at the next character and possibly go around without consuming another
|
||||
# one at the top of the loop.
|
||||
try:
|
||||
ch, escaped = next(pattern_iter)
|
||||
except StopIteration:
|
||||
return [('', [])]
|
||||
|
||||
try:
|
||||
while True:
|
||||
if escaped:
|
||||
result.append(ch)
|
||||
elif ch == '.':
|
||||
# Replace "any character" with an arbitrary representative.
|
||||
result.append(".")
|
||||
elif ch == '|':
|
||||
# FIXME: One day we'll should do this, but not in 1.0.
|
||||
raise NotImplementedError('Awaiting Implementation')
|
||||
elif ch == "^":
|
||||
pass
|
||||
elif ch == '$':
|
||||
break
|
||||
elif ch == ')':
|
||||
# This can only be the end of a non-capturing group, since all
|
||||
# other unescaped parentheses are handled by the grouping
|
||||
# section later (and the full group is handled there).
|
||||
#
|
||||
# We regroup everything inside the capturing group so that it
|
||||
# can be quantified, if necessary.
|
||||
start = non_capturing_groups.pop()
|
||||
inner = NonCapture(result[start:])
|
||||
result = result[:start] + [inner]
|
||||
elif ch == '[':
|
||||
# Replace ranges with the first character in the range.
|
||||
ch, escaped = next(pattern_iter)
|
||||
result.append(ch)
|
||||
ch, escaped = next(pattern_iter)
|
||||
while escaped or ch != ']':
|
||||
ch, escaped = next(pattern_iter)
|
||||
elif ch == '(':
|
||||
# Some kind of group.
|
||||
ch, escaped = next(pattern_iter)
|
||||
if ch != '?' or escaped:
|
||||
# A positional group
|
||||
name = "_%d" % num_args
|
||||
num_args += 1
|
||||
result.append(Group((("%%(%s)s" % name), name)))
|
||||
walk_to_end(ch, pattern_iter)
|
||||
else:
|
||||
ch, escaped = next(pattern_iter)
|
||||
if ch in '!=<':
|
||||
# All of these are ignorable. Walk to the end of the
|
||||
# group.
|
||||
walk_to_end(ch, pattern_iter)
|
||||
elif ch == ':':
|
||||
# Non-capturing group
|
||||
non_capturing_groups.append(len(result))
|
||||
elif ch != 'P':
|
||||
# Anything else, other than a named group, is something
|
||||
# we cannot reverse.
|
||||
raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
|
||||
else:
|
||||
ch, escaped = next(pattern_iter)
|
||||
if ch not in ('<', '='):
|
||||
raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
|
||||
# We are in a named capturing group. Extra the name and
|
||||
# then skip to the end.
|
||||
if ch == '<':
|
||||
terminal_char = '>'
|
||||
# We are in a named backreference.
|
||||
else:
|
||||
terminal_char = ')'
|
||||
name = []
|
||||
ch, escaped = next(pattern_iter)
|
||||
while ch != terminal_char:
|
||||
name.append(ch)
|
||||
ch, escaped = next(pattern_iter)
|
||||
param = ''.join(name)
|
||||
# Named backreferences have already consumed the
|
||||
# parenthesis.
|
||||
if terminal_char != ')':
|
||||
result.append(Group((("%%(%s)s" % param), param)))
|
||||
walk_to_end(ch, pattern_iter)
|
||||
else:
|
||||
result.append(Group((("%%(%s)s" % param), None)))
|
||||
elif ch in "*?+{":
|
||||
# Quantifiers affect the previous item in the result list.
|
||||
count, ch = get_quantifier(ch, pattern_iter)
|
||||
if ch:
|
||||
# We had to look ahead, but it wasn't need to compute the
|
||||
# quantifier, so use this character next time around the
|
||||
# main loop.
|
||||
consume_next = False
|
||||
|
||||
if count == 0:
|
||||
if contains(result[-1], Group):
|
||||
# If we are quantifying a capturing group (or
|
||||
# something containing such a group) and the minimum is
|
||||
# zero, we must also handle the case of one occurrence
|
||||
# being present. All the quantifiers (except {0,0},
|
||||
# which we conveniently ignore) that have a 0 minimum
|
||||
# also allow a single occurrence.
|
||||
result[-1] = Choice([None, result[-1]])
|
||||
else:
|
||||
result.pop()
|
||||
elif count > 1:
|
||||
result.extend([result[-1]] * (count - 1))
|
||||
else:
|
||||
# Anything else is a literal.
|
||||
result.append(ch)
|
||||
|
||||
if consume_next:
|
||||
ch, escaped = next(pattern_iter)
|
||||
consume_next = True
|
||||
except StopIteration:
|
||||
pass
|
||||
except NotImplementedError:
|
||||
# A case of using the disjunctive form. No results for you!
|
||||
return [('', [])]
|
||||
|
||||
return list(zip(*flatten_result(result)))
|
||||
|
||||
|
||||
def next_char(input_iter):
|
||||
r"""
|
||||
An iterator that yields the next character from "pattern_iter", respecting
|
||||
escape sequences. An escaped character is replaced by a representative of
|
||||
its class (e.g. \w -> "x"). If the escaped character is one that is
|
||||
skipped, it is not returned (the next character is returned instead).
|
||||
|
||||
Yield the next character, along with a boolean indicating whether it is a
|
||||
raw (unescaped) character or not.
|
||||
"""
|
||||
for ch in input_iter:
|
||||
if ch != '\\':
|
||||
yield ch, False
|
||||
continue
|
||||
ch = next(input_iter)
|
||||
representative = ESCAPE_MAPPINGS.get(ch, ch)
|
||||
if representative is None:
|
||||
continue
|
||||
yield representative, True
|
||||
|
||||
|
||||
def walk_to_end(ch, input_iter):
|
||||
"""
|
||||
The iterator is currently inside a capturing group. Walk to the close of
|
||||
this group, skipping over any nested groups and handling escaped
|
||||
parentheses correctly.
|
||||
"""
|
||||
if ch == '(':
|
||||
nesting = 1
|
||||
else:
|
||||
nesting = 0
|
||||
for ch, escaped in input_iter:
|
||||
if escaped:
|
||||
continue
|
||||
elif ch == '(':
|
||||
nesting += 1
|
||||
elif ch == ')':
|
||||
if not nesting:
|
||||
return
|
||||
nesting -= 1
|
||||
|
||||
|
||||
def get_quantifier(ch, input_iter):
|
||||
"""
|
||||
Parse a quantifier from the input, where "ch" is the first character in the
|
||||
quantifier.
|
||||
|
||||
Return the minimum number of occurrences permitted by the quantifier and
|
||||
either None or the next character from the input_iter if the next character
|
||||
is not part of the quantifier.
|
||||
"""
|
||||
if ch in '*?+':
|
||||
try:
|
||||
ch2, escaped = next(input_iter)
|
||||
except StopIteration:
|
||||
ch2 = None
|
||||
if ch2 == '?':
|
||||
ch2 = None
|
||||
if ch == '+':
|
||||
return 1, ch2
|
||||
return 0, ch2
|
||||
|
||||
quant = []
|
||||
while ch != '}':
|
||||
ch, escaped = next(input_iter)
|
||||
quant.append(ch)
|
||||
quant = quant[:-1]
|
||||
values = ''.join(quant).split(',')
|
||||
|
||||
# Consume the trailing '?', if necessary.
|
||||
try:
|
||||
ch, escaped = next(input_iter)
|
||||
except StopIteration:
|
||||
ch = None
|
||||
if ch == '?':
|
||||
ch = None
|
||||
return int(values[0]), ch
|
||||
|
||||
|
||||
def contains(source, inst):
|
||||
"""
|
||||
Return True if the "source" contains an instance of "inst". False,
|
||||
otherwise.
|
||||
"""
|
||||
if isinstance(source, inst):
|
||||
return True
|
||||
if isinstance(source, NonCapture):
|
||||
for elt in source:
|
||||
if contains(elt, inst):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def flatten_result(source):
|
||||
"""
|
||||
Turn the given source sequence into a list of reg-exp possibilities and
|
||||
their arguments. Return a list of strings and a list of argument lists.
|
||||
Each of the two lists will be of the same length.
|
||||
"""
|
||||
if source is None:
|
||||
return [''], [[]]
|
||||
if isinstance(source, Group):
|
||||
if source[1] is None:
|
||||
params = []
|
||||
else:
|
||||
params = [source[1]]
|
||||
return [source[0]], [params]
|
||||
result = ['']
|
||||
result_args = [[]]
|
||||
pos = last = 0
|
||||
for pos, elt in enumerate(source):
|
||||
if isinstance(elt, str):
|
||||
continue
|
||||
piece = ''.join(source[last:pos])
|
||||
if isinstance(elt, Group):
|
||||
piece += elt[0]
|
||||
param = elt[1]
|
||||
else:
|
||||
param = None
|
||||
last = pos + 1
|
||||
for i in range(len(result)):
|
||||
result[i] += piece
|
||||
if param:
|
||||
result_args[i].append(param)
|
||||
if isinstance(elt, (Choice, NonCapture)):
|
||||
if isinstance(elt, NonCapture):
|
||||
elt = [elt]
|
||||
inner_result, inner_args = [], []
|
||||
for item in elt:
|
||||
res, args = flatten_result(item)
|
||||
inner_result.extend(res)
|
||||
inner_args.extend(args)
|
||||
new_result = []
|
||||
new_args = []
|
||||
for item, args in zip(result, result_args):
|
||||
for i_item, i_args in zip(inner_result, inner_args):
|
||||
new_result.append(item + i_item)
|
||||
new_args.append(args[:] + i_args)
|
||||
result = new_result
|
||||
result_args = new_args
|
||||
if pos >= last:
|
||||
piece = ''.join(source[last:])
|
||||
for i in range(len(result)):
|
||||
result[i] += piece
|
||||
return result, result_args
|
||||
63
venv/lib/python3.8/site-packages/django/utils/safestring.py
Normal file
63
venv/lib/python3.8/site-packages/django/utils/safestring.py
Normal file
@@ -0,0 +1,63 @@
|
||||
"""
|
||||
Functions for working with "safe strings": strings that can be displayed safely
|
||||
without further escaping in HTML. Marking something as a "safe string" means
|
||||
that the producer of the string has already turned characters that should not
|
||||
be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
|
||||
"""
|
||||
|
||||
from django.utils.functional import wraps
|
||||
|
||||
|
||||
class SafeData:
|
||||
def __html__(self):
|
||||
"""
|
||||
Return the html representation of a string for interoperability.
|
||||
|
||||
This allows other template engines to understand Django's SafeData.
|
||||
"""
|
||||
return self
|
||||
|
||||
|
||||
class SafeString(str, SafeData):
|
||||
"""
|
||||
A str subclass that has been specifically marked as "safe" for HTML output
|
||||
purposes.
|
||||
"""
|
||||
def __add__(self, rhs):
|
||||
"""
|
||||
Concatenating a safe string with another safe bytestring or
|
||||
safe string is safe. Otherwise, the result is no longer safe.
|
||||
"""
|
||||
t = super().__add__(rhs)
|
||||
if isinstance(rhs, SafeData):
|
||||
return SafeString(t)
|
||||
return t
|
||||
|
||||
def __str__(self):
|
||||
return self
|
||||
|
||||
|
||||
SafeText = SafeString # For backwards compatibility since Django 2.0.
|
||||
|
||||
|
||||
def _safety_decorator(safety_marker, func):
|
||||
@wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
return safety_marker(func(*args, **kwargs))
|
||||
return wrapped
|
||||
|
||||
|
||||
def mark_safe(s):
|
||||
"""
|
||||
Explicitly mark a string as safe for (HTML) output purposes. The returned
|
||||
object can be used everywhere a string is appropriate.
|
||||
|
||||
If used on a method as a decorator, mark the returned data as safe.
|
||||
|
||||
Can be called multiple times on a single string.
|
||||
"""
|
||||
if hasattr(s, '__html__'):
|
||||
return s
|
||||
if callable(s):
|
||||
return _safety_decorator(mark_safe, s)
|
||||
return SafeString(s)
|
||||
215
venv/lib/python3.8/site-packages/django/utils/termcolors.py
Normal file
215
venv/lib/python3.8/site-packages/django/utils/termcolors.py
Normal file
@@ -0,0 +1,215 @@
|
||||
"""
|
||||
termcolors.py
|
||||
"""
|
||||
|
||||
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
|
||||
foreground = {color_names[x]: '3%s' % x for x in range(8)}
|
||||
background = {color_names[x]: '4%s' % x for x in range(8)}
|
||||
|
||||
RESET = '0'
|
||||
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
|
||||
|
||||
|
||||
def colorize(text='', opts=(), **kwargs):
|
||||
"""
|
||||
Return your text, enclosed in ANSI graphics codes.
|
||||
|
||||
Depends on the keyword arguments 'fg' and 'bg', and the contents of
|
||||
the opts tuple/list.
|
||||
|
||||
Return the RESET code if no parameters are given.
|
||||
|
||||
Valid colors:
|
||||
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
|
||||
|
||||
Valid options:
|
||||
'bold'
|
||||
'underscore'
|
||||
'blink'
|
||||
'reverse'
|
||||
'conceal'
|
||||
'noreset' - string will not be auto-terminated with the RESET code
|
||||
|
||||
Examples:
|
||||
colorize('hello', fg='red', bg='blue', opts=('blink',))
|
||||
colorize()
|
||||
colorize('goodbye', opts=('underscore',))
|
||||
print(colorize('first line', fg='red', opts=('noreset',)))
|
||||
print('this should be red too')
|
||||
print(colorize('and so should this'))
|
||||
print('this should not be red')
|
||||
"""
|
||||
code_list = []
|
||||
if text == '' and len(opts) == 1 and opts[0] == 'reset':
|
||||
return '\x1b[%sm' % RESET
|
||||
for k, v in kwargs.items():
|
||||
if k == 'fg':
|
||||
code_list.append(foreground[v])
|
||||
elif k == 'bg':
|
||||
code_list.append(background[v])
|
||||
for o in opts:
|
||||
if o in opt_dict:
|
||||
code_list.append(opt_dict[o])
|
||||
if 'noreset' not in opts:
|
||||
text = '%s\x1b[%sm' % (text or '', RESET)
|
||||
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
|
||||
|
||||
|
||||
def make_style(opts=(), **kwargs):
|
||||
"""
|
||||
Return a function with default parameters for colorize()
|
||||
|
||||
Example:
|
||||
bold_red = make_style(opts=('bold',), fg='red')
|
||||
print(bold_red('hello'))
|
||||
KEYWORD = make_style(fg='yellow')
|
||||
COMMENT = make_style(fg='blue', opts=('bold',))
|
||||
"""
|
||||
return lambda text: colorize(text, opts, **kwargs)
|
||||
|
||||
|
||||
NOCOLOR_PALETTE = 'nocolor'
|
||||
DARK_PALETTE = 'dark'
|
||||
LIGHT_PALETTE = 'light'
|
||||
|
||||
PALETTES = {
|
||||
NOCOLOR_PALETTE: {
|
||||
'ERROR': {},
|
||||
'SUCCESS': {},
|
||||
'WARNING': {},
|
||||
'NOTICE': {},
|
||||
'SQL_FIELD': {},
|
||||
'SQL_COLTYPE': {},
|
||||
'SQL_KEYWORD': {},
|
||||
'SQL_TABLE': {},
|
||||
'HTTP_INFO': {},
|
||||
'HTTP_SUCCESS': {},
|
||||
'HTTP_REDIRECT': {},
|
||||
'HTTP_NOT_MODIFIED': {},
|
||||
'HTTP_BAD_REQUEST': {},
|
||||
'HTTP_NOT_FOUND': {},
|
||||
'HTTP_SERVER_ERROR': {},
|
||||
'MIGRATE_HEADING': {},
|
||||
'MIGRATE_LABEL': {},
|
||||
},
|
||||
DARK_PALETTE: {
|
||||
'ERROR': {'fg': 'red', 'opts': ('bold',)},
|
||||
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
|
||||
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
|
||||
'NOTICE': {'fg': 'red'},
|
||||
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
|
||||
'SQL_COLTYPE': {'fg': 'green'},
|
||||
'SQL_KEYWORD': {'fg': 'yellow'},
|
||||
'SQL_TABLE': {'opts': ('bold',)},
|
||||
'HTTP_INFO': {'opts': ('bold',)},
|
||||
'HTTP_SUCCESS': {},
|
||||
'HTTP_REDIRECT': {'fg': 'green'},
|
||||
'HTTP_NOT_MODIFIED': {'fg': 'cyan'},
|
||||
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
|
||||
'HTTP_NOT_FOUND': {'fg': 'yellow'},
|
||||
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
|
||||
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
|
||||
'MIGRATE_LABEL': {'opts': ('bold',)},
|
||||
},
|
||||
LIGHT_PALETTE: {
|
||||
'ERROR': {'fg': 'red', 'opts': ('bold',)},
|
||||
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
|
||||
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
|
||||
'NOTICE': {'fg': 'red'},
|
||||
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
|
||||
'SQL_COLTYPE': {'fg': 'green'},
|
||||
'SQL_KEYWORD': {'fg': 'blue'},
|
||||
'SQL_TABLE': {'opts': ('bold',)},
|
||||
'HTTP_INFO': {'opts': ('bold',)},
|
||||
'HTTP_SUCCESS': {},
|
||||
'HTTP_REDIRECT': {'fg': 'green', 'opts': ('bold',)},
|
||||
'HTTP_NOT_MODIFIED': {'fg': 'green'},
|
||||
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
|
||||
'HTTP_NOT_FOUND': {'fg': 'red'},
|
||||
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
|
||||
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
|
||||
'MIGRATE_LABEL': {'opts': ('bold',)},
|
||||
}
|
||||
}
|
||||
DEFAULT_PALETTE = DARK_PALETTE
|
||||
|
||||
|
||||
def parse_color_setting(config_string):
|
||||
"""Parse a DJANGO_COLORS environment variable to produce the system palette
|
||||
|
||||
The general form of a palette definition is:
|
||||
|
||||
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
|
||||
|
||||
where:
|
||||
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
|
||||
role is a named style used by Django
|
||||
fg is a background color.
|
||||
bg is a background color.
|
||||
option is a display options.
|
||||
|
||||
Specifying a named palette is the same as manually specifying the individual
|
||||
definitions for each role. Any individual definitions following the palette
|
||||
definition will augment the base palette definition.
|
||||
|
||||
Valid roles:
|
||||
'error', 'success', 'warning', 'notice', 'sql_field', 'sql_coltype',
|
||||
'sql_keyword', 'sql_table', 'http_info', 'http_success',
|
||||
'http_redirect', 'http_not_modified', 'http_bad_request',
|
||||
'http_not_found', 'http_server_error', 'migrate_heading',
|
||||
'migrate_label'
|
||||
|
||||
Valid colors:
|
||||
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
|
||||
|
||||
Valid options:
|
||||
'bold', 'underscore', 'blink', 'reverse', 'conceal', 'noreset'
|
||||
"""
|
||||
if not config_string:
|
||||
return PALETTES[DEFAULT_PALETTE]
|
||||
|
||||
# Split the color configuration into parts
|
||||
parts = config_string.lower().split(';')
|
||||
palette = PALETTES[NOCOLOR_PALETTE].copy()
|
||||
for part in parts:
|
||||
if part in PALETTES:
|
||||
# A default palette has been specified
|
||||
palette.update(PALETTES[part])
|
||||
elif '=' in part:
|
||||
# Process a palette defining string
|
||||
definition = {}
|
||||
|
||||
# Break the definition into the role,
|
||||
# plus the list of specific instructions.
|
||||
# The role must be in upper case
|
||||
role, instructions = part.split('=')
|
||||
role = role.upper()
|
||||
|
||||
styles = instructions.split(',')
|
||||
styles.reverse()
|
||||
|
||||
# The first instruction can contain a slash
|
||||
# to break apart fg/bg.
|
||||
colors = styles.pop().split('/')
|
||||
colors.reverse()
|
||||
fg = colors.pop()
|
||||
if fg in color_names:
|
||||
definition['fg'] = fg
|
||||
if colors and colors[-1] in color_names:
|
||||
definition['bg'] = colors[-1]
|
||||
|
||||
# All remaining instructions are options
|
||||
opts = tuple(s for s in styles if s in opt_dict)
|
||||
if opts:
|
||||
definition['opts'] = opts
|
||||
|
||||
# The nocolor palette has all available roles.
|
||||
# Use that palette as the basis for determining
|
||||
# if the role is valid.
|
||||
if role in PALETTES[NOCOLOR_PALETTE] and definition:
|
||||
palette[role] = definition
|
||||
|
||||
# If there are no colors specified, return the empty palette.
|
||||
if palette == PALETTES[NOCOLOR_PALETTE]:
|
||||
return None
|
||||
return palette
|
||||
423
venv/lib/python3.8/site-packages/django/utils/text.py
Normal file
423
venv/lib/python3.8/site-packages/django/utils/text.py
Normal file
@@ -0,0 +1,423 @@
|
||||
import html.entities
|
||||
import re
|
||||
import unicodedata
|
||||
import warnings
|
||||
from gzip import GzipFile
|
||||
from io import BytesIO
|
||||
|
||||
from django.utils.deprecation import RemovedInDjango40Warning
|
||||
from django.utils.functional import SimpleLazyObject, keep_lazy_text, lazy
|
||||
from django.utils.translation import gettext as _, gettext_lazy, pgettext
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def capfirst(x):
|
||||
"""Capitalize the first letter of a string."""
|
||||
return x and str(x)[0].upper() + str(x)[1:]
|
||||
|
||||
|
||||
# Set up regular expressions
|
||||
re_words = re.compile(r'<[^>]+?>|([^<>\s]+)', re.S)
|
||||
re_chars = re.compile(r'<[^>]+?>|(.)', re.S)
|
||||
re_tag = re.compile(r'<(/)?(\S+?)(?:(\s*/)|\s.*?)?>', re.S)
|
||||
re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines
|
||||
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def wrap(text, width):
|
||||
"""
|
||||
A word-wrap function that preserves existing line breaks. Expects that
|
||||
existing line breaks are posix newlines.
|
||||
|
||||
Preserve all white space except added line breaks consume the space on
|
||||
which they break the line.
|
||||
|
||||
Don't wrap long words, thus the output text may have lines longer than
|
||||
``width``.
|
||||
"""
|
||||
def _generator():
|
||||
for line in text.splitlines(True): # True keeps trailing linebreaks
|
||||
max_width = min((line.endswith('\n') and width + 1 or width), width)
|
||||
while len(line) > max_width:
|
||||
space = line[:max_width + 1].rfind(' ') + 1
|
||||
if space == 0:
|
||||
space = line.find(' ') + 1
|
||||
if space == 0:
|
||||
yield line
|
||||
line = ''
|
||||
break
|
||||
yield '%s\n' % line[:space - 1]
|
||||
line = line[space:]
|
||||
max_width = min((line.endswith('\n') and width + 1 or width), width)
|
||||
if line:
|
||||
yield line
|
||||
return ''.join(_generator())
|
||||
|
||||
|
||||
class Truncator(SimpleLazyObject):
|
||||
"""
|
||||
An object used to truncate text, either by characters or words.
|
||||
"""
|
||||
def __init__(self, text):
|
||||
super().__init__(lambda: str(text))
|
||||
|
||||
def add_truncation_text(self, text, truncate=None):
|
||||
if truncate is None:
|
||||
truncate = pgettext(
|
||||
'String to return when truncating text',
|
||||
'%(truncated_text)s…')
|
||||
if '%(truncated_text)s' in truncate:
|
||||
return truncate % {'truncated_text': text}
|
||||
# The truncation text didn't contain the %(truncated_text)s string
|
||||
# replacement argument so just append it to the text.
|
||||
if text.endswith(truncate):
|
||||
# But don't append the truncation text if the current text already
|
||||
# ends in this.
|
||||
return text
|
||||
return '%s%s' % (text, truncate)
|
||||
|
||||
def chars(self, num, truncate=None, html=False):
|
||||
"""
|
||||
Return the text truncated to be no longer than the specified number
|
||||
of characters.
|
||||
|
||||
`truncate` specifies what should be used to notify that the string has
|
||||
been truncated, defaulting to a translatable string of an ellipsis.
|
||||
"""
|
||||
self._setup()
|
||||
length = int(num)
|
||||
text = unicodedata.normalize('NFC', self._wrapped)
|
||||
|
||||
# Calculate the length to truncate to (max length - end_text length)
|
||||
truncate_len = length
|
||||
for char in self.add_truncation_text('', truncate):
|
||||
if not unicodedata.combining(char):
|
||||
truncate_len -= 1
|
||||
if truncate_len == 0:
|
||||
break
|
||||
if html:
|
||||
return self._truncate_html(length, truncate, text, truncate_len, False)
|
||||
return self._text_chars(length, truncate, text, truncate_len)
|
||||
|
||||
def _text_chars(self, length, truncate, text, truncate_len):
|
||||
"""Truncate a string after a certain number of chars."""
|
||||
s_len = 0
|
||||
end_index = None
|
||||
for i, char in enumerate(text):
|
||||
if unicodedata.combining(char):
|
||||
# Don't consider combining characters
|
||||
# as adding to the string length
|
||||
continue
|
||||
s_len += 1
|
||||
if end_index is None and s_len > truncate_len:
|
||||
end_index = i
|
||||
if s_len > length:
|
||||
# Return the truncated string
|
||||
return self.add_truncation_text(text[:end_index or 0],
|
||||
truncate)
|
||||
|
||||
# Return the original string since no truncation was necessary
|
||||
return text
|
||||
|
||||
def words(self, num, truncate=None, html=False):
|
||||
"""
|
||||
Truncate a string after a certain number of words. `truncate` specifies
|
||||
what should be used to notify that the string has been truncated,
|
||||
defaulting to ellipsis.
|
||||
"""
|
||||
self._setup()
|
||||
length = int(num)
|
||||
if html:
|
||||
return self._truncate_html(length, truncate, self._wrapped, length, True)
|
||||
return self._text_words(length, truncate)
|
||||
|
||||
def _text_words(self, length, truncate):
|
||||
"""
|
||||
Truncate a string after a certain number of words.
|
||||
|
||||
Strip newlines in the string.
|
||||
"""
|
||||
words = self._wrapped.split()
|
||||
if len(words) > length:
|
||||
words = words[:length]
|
||||
return self.add_truncation_text(' '.join(words), truncate)
|
||||
return ' '.join(words)
|
||||
|
||||
def _truncate_html(self, length, truncate, text, truncate_len, words):
|
||||
"""
|
||||
Truncate HTML to a certain number of chars (not counting tags and
|
||||
comments), or, if words is True, then to a certain number of words.
|
||||
Close opened tags if they were correctly closed in the given HTML.
|
||||
|
||||
Preserve newlines in the HTML.
|
||||
"""
|
||||
if words and length <= 0:
|
||||
return ''
|
||||
|
||||
html4_singlets = (
|
||||
'br', 'col', 'link', 'base', 'img',
|
||||
'param', 'area', 'hr', 'input'
|
||||
)
|
||||
|
||||
# Count non-HTML chars/words and keep note of open tags
|
||||
pos = 0
|
||||
end_text_pos = 0
|
||||
current_len = 0
|
||||
open_tags = []
|
||||
|
||||
regex = re_words if words else re_chars
|
||||
|
||||
while current_len <= length:
|
||||
m = regex.search(text, pos)
|
||||
if not m:
|
||||
# Checked through whole string
|
||||
break
|
||||
pos = m.end(0)
|
||||
if m.group(1):
|
||||
# It's an actual non-HTML word or char
|
||||
current_len += 1
|
||||
if current_len == truncate_len:
|
||||
end_text_pos = pos
|
||||
continue
|
||||
# Check for tag
|
||||
tag = re_tag.match(m.group(0))
|
||||
if not tag or current_len >= truncate_len:
|
||||
# Don't worry about non tags or tags after our truncate point
|
||||
continue
|
||||
closing_tag, tagname, self_closing = tag.groups()
|
||||
# Element names are always case-insensitive
|
||||
tagname = tagname.lower()
|
||||
if self_closing or tagname in html4_singlets:
|
||||
pass
|
||||
elif closing_tag:
|
||||
# Check for match in open tags list
|
||||
try:
|
||||
i = open_tags.index(tagname)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
# SGML: An end tag closes, back to the matching start tag,
|
||||
# all unclosed intervening start tags with omitted end tags
|
||||
open_tags = open_tags[i + 1:]
|
||||
else:
|
||||
# Add it to the start of the open tags list
|
||||
open_tags.insert(0, tagname)
|
||||
|
||||
if current_len <= length:
|
||||
return text
|
||||
out = text[:end_text_pos]
|
||||
truncate_text = self.add_truncation_text('', truncate)
|
||||
if truncate_text:
|
||||
out += truncate_text
|
||||
# Close any tags still open
|
||||
for tag in open_tags:
|
||||
out += '</%s>' % tag
|
||||
# Return string
|
||||
return out
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def get_valid_filename(s):
|
||||
"""
|
||||
Return the given string converted to a string that can be used for a clean
|
||||
filename. Remove leading and trailing spaces; convert other spaces to
|
||||
underscores; and remove anything that is not an alphanumeric, dash,
|
||||
underscore, or dot.
|
||||
>>> get_valid_filename("john's portrait in 2004.jpg")
|
||||
'johns_portrait_in_2004.jpg'
|
||||
"""
|
||||
s = str(s).strip().replace(' ', '_')
|
||||
return re.sub(r'(?u)[^-\w.]', '', s)
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def get_text_list(list_, last_word=gettext_lazy('or')):
|
||||
"""
|
||||
>>> get_text_list(['a', 'b', 'c', 'd'])
|
||||
'a, b, c or d'
|
||||
>>> get_text_list(['a', 'b', 'c'], 'and')
|
||||
'a, b and c'
|
||||
>>> get_text_list(['a', 'b'], 'and')
|
||||
'a and b'
|
||||
>>> get_text_list(['a'])
|
||||
'a'
|
||||
>>> get_text_list([])
|
||||
''
|
||||
"""
|
||||
if not list_:
|
||||
return ''
|
||||
if len(list_) == 1:
|
||||
return str(list_[0])
|
||||
return '%s %s %s' % (
|
||||
# Translators: This string is used as a separator between list elements
|
||||
_(', ').join(str(i) for i in list_[:-1]), str(last_word), str(list_[-1])
|
||||
)
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def normalize_newlines(text):
|
||||
"""Normalize CRLF and CR newlines to just LF."""
|
||||
return re_newlines.sub('\n', str(text))
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def phone2numeric(phone):
|
||||
"""Convert a phone number with letters into its numeric equivalent."""
|
||||
char2number = {
|
||||
'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4',
|
||||
'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6',
|
||||
'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8',
|
||||
'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
|
||||
}
|
||||
return ''.join(char2number.get(c, c) for c in phone.lower())
|
||||
|
||||
|
||||
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
|
||||
# Used with permission.
|
||||
def compress_string(s):
|
||||
zbuf = BytesIO()
|
||||
with GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0) as zfile:
|
||||
zfile.write(s)
|
||||
return zbuf.getvalue()
|
||||
|
||||
|
||||
class StreamingBuffer(BytesIO):
|
||||
def read(self):
|
||||
ret = self.getvalue()
|
||||
self.seek(0)
|
||||
self.truncate()
|
||||
return ret
|
||||
|
||||
|
||||
# Like compress_string, but for iterators of strings.
|
||||
def compress_sequence(sequence):
|
||||
buf = StreamingBuffer()
|
||||
with GzipFile(mode='wb', compresslevel=6, fileobj=buf, mtime=0) as zfile:
|
||||
# Output headers...
|
||||
yield buf.read()
|
||||
for item in sequence:
|
||||
zfile.write(item)
|
||||
data = buf.read()
|
||||
if data:
|
||||
yield data
|
||||
yield buf.read()
|
||||
|
||||
|
||||
# Expression to match some_token and some_token="with spaces" (and similarly
|
||||
# for single-quoted strings).
|
||||
smart_split_re = re.compile(r"""
|
||||
((?:
|
||||
[^\s'"]*
|
||||
(?:
|
||||
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
|
||||
[^\s'"]*
|
||||
)+
|
||||
) | \S+)
|
||||
""", re.VERBOSE)
|
||||
|
||||
|
||||
def smart_split(text):
|
||||
r"""
|
||||
Generator that splits a string by spaces, leaving quoted phrases together.
|
||||
Supports both single and double quotes, and supports escaping quotes with
|
||||
backslashes. In the output, strings will keep their initial and trailing
|
||||
quote marks and escaped quotes will remain escaped (the results can then
|
||||
be further processed with unescape_string_literal()).
|
||||
|
||||
>>> list(smart_split(r'This is "a person\'s" test.'))
|
||||
['This', 'is', '"a person\\\'s"', 'test.']
|
||||
>>> list(smart_split(r"Another 'person\'s' test."))
|
||||
['Another', "'person\\'s'", 'test.']
|
||||
>>> list(smart_split(r'A "\"funky\" style" test.'))
|
||||
['A', '"\\"funky\\" style"', 'test.']
|
||||
"""
|
||||
for bit in smart_split_re.finditer(str(text)):
|
||||
yield bit.group(0)
|
||||
|
||||
|
||||
def _replace_entity(match):
|
||||
text = match.group(1)
|
||||
if text[0] == '#':
|
||||
text = text[1:]
|
||||
try:
|
||||
if text[0] in 'xX':
|
||||
c = int(text[1:], 16)
|
||||
else:
|
||||
c = int(text)
|
||||
return chr(c)
|
||||
except ValueError:
|
||||
return match.group(0)
|
||||
else:
|
||||
try:
|
||||
return chr(html.entities.name2codepoint[text])
|
||||
except KeyError:
|
||||
return match.group(0)
|
||||
|
||||
|
||||
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def unescape_entities(text):
|
||||
warnings.warn(
|
||||
'django.utils.text.unescape_entities() is deprecated in favor of '
|
||||
'html.unescape().',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return _entity_re.sub(_replace_entity, str(text))
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def unescape_string_literal(s):
|
||||
r"""
|
||||
Convert quoted string literals to unquoted strings with escaped quotes and
|
||||
backslashes unquoted::
|
||||
|
||||
>>> unescape_string_literal('"abc"')
|
||||
'abc'
|
||||
>>> unescape_string_literal("'abc'")
|
||||
'abc'
|
||||
>>> unescape_string_literal('"a \"bc\""')
|
||||
'a "bc"'
|
||||
>>> unescape_string_literal("'\'ab\' c'")
|
||||
"'ab' c"
|
||||
"""
|
||||
if s[0] not in "\"'" or s[-1] != s[0]:
|
||||
raise ValueError("Not a string literal: %r" % s)
|
||||
quote = s[0]
|
||||
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
|
||||
|
||||
|
||||
@keep_lazy_text
|
||||
def slugify(value, allow_unicode=False):
|
||||
"""
|
||||
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
|
||||
Remove characters that aren't alphanumerics, underscores, or hyphens.
|
||||
Convert to lowercase. Also strip leading and trailing whitespace.
|
||||
"""
|
||||
value = str(value)
|
||||
if allow_unicode:
|
||||
value = unicodedata.normalize('NFKC', value)
|
||||
else:
|
||||
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
|
||||
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
|
||||
return re.sub(r'[-\s]+', '-', value)
|
||||
|
||||
|
||||
def camel_case_to_spaces(value):
|
||||
"""
|
||||
Split CamelCase and convert to lowercase. Strip surrounding whitespace.
|
||||
"""
|
||||
return re_camel_case.sub(r' \1', value).strip().lower()
|
||||
|
||||
|
||||
def _format_lazy(format_string, *args, **kwargs):
|
||||
"""
|
||||
Apply str.format() on 'format_string' where format_string, args,
|
||||
and/or kwargs might be lazy.
|
||||
"""
|
||||
return format_string.format(*args, **kwargs)
|
||||
|
||||
|
||||
format_lazy = lazy(_format_lazy, str)
|
||||
91
venv/lib/python3.8/site-packages/django/utils/timesince.py
Normal file
91
venv/lib/python3.8/site-packages/django/utils/timesince.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import calendar
|
||||
import datetime
|
||||
|
||||
from django.utils.html import avoid_wrapping
|
||||
from django.utils.timezone import is_aware, utc
|
||||
from django.utils.translation import gettext, ngettext_lazy
|
||||
|
||||
TIME_STRINGS = {
|
||||
'year': ngettext_lazy('%d year', '%d years'),
|
||||
'month': ngettext_lazy('%d month', '%d months'),
|
||||
'week': ngettext_lazy('%d week', '%d weeks'),
|
||||
'day': ngettext_lazy('%d day', '%d days'),
|
||||
'hour': ngettext_lazy('%d hour', '%d hours'),
|
||||
'minute': ngettext_lazy('%d minute', '%d minutes'),
|
||||
}
|
||||
|
||||
TIMESINCE_CHUNKS = (
|
||||
(60 * 60 * 24 * 365, 'year'),
|
||||
(60 * 60 * 24 * 30, 'month'),
|
||||
(60 * 60 * 24 * 7, 'week'),
|
||||
(60 * 60 * 24, 'day'),
|
||||
(60 * 60, 'hour'),
|
||||
(60, 'minute'),
|
||||
)
|
||||
|
||||
|
||||
def timesince(d, now=None, reversed=False, time_strings=None):
|
||||
"""
|
||||
Take two datetime objects and return the time between d and now as a nicely
|
||||
formatted string, e.g. "10 minutes". If d occurs after now, return
|
||||
"0 minutes".
|
||||
|
||||
Units used are years, months, weeks, days, hours, and minutes.
|
||||
Seconds and microseconds are ignored. Up to two adjacent units will be
|
||||
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
|
||||
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
|
||||
|
||||
`time_strings` is an optional dict of strings to replace the default
|
||||
TIME_STRINGS dict.
|
||||
|
||||
Adapted from
|
||||
https://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
|
||||
"""
|
||||
if time_strings is None:
|
||||
time_strings = TIME_STRINGS
|
||||
|
||||
# Convert datetime.date to datetime.datetime for comparison.
|
||||
if not isinstance(d, datetime.datetime):
|
||||
d = datetime.datetime(d.year, d.month, d.day)
|
||||
if now and not isinstance(now, datetime.datetime):
|
||||
now = datetime.datetime(now.year, now.month, now.day)
|
||||
|
||||
now = now or datetime.datetime.now(utc if is_aware(d) else None)
|
||||
|
||||
if reversed:
|
||||
d, now = now, d
|
||||
delta = now - d
|
||||
|
||||
# Deal with leapyears by subtracing the number of leapdays
|
||||
leapdays = calendar.leapdays(d.year, now.year)
|
||||
if leapdays != 0:
|
||||
if calendar.isleap(d.year):
|
||||
leapdays -= 1
|
||||
elif calendar.isleap(now.year):
|
||||
leapdays += 1
|
||||
delta -= datetime.timedelta(leapdays)
|
||||
|
||||
# ignore microseconds
|
||||
since = delta.days * 24 * 60 * 60 + delta.seconds
|
||||
if since <= 0:
|
||||
# d is in the future compared to now, stop processing.
|
||||
return avoid_wrapping(gettext('0 minutes'))
|
||||
for i, (seconds, name) in enumerate(TIMESINCE_CHUNKS):
|
||||
count = since // seconds
|
||||
if count != 0:
|
||||
break
|
||||
result = avoid_wrapping(time_strings[name] % count)
|
||||
if i + 1 < len(TIMESINCE_CHUNKS):
|
||||
# Now get the second item
|
||||
seconds2, name2 = TIMESINCE_CHUNKS[i + 1]
|
||||
count2 = (since - (seconds * count)) // seconds2
|
||||
if count2 != 0:
|
||||
result += gettext(', ') + avoid_wrapping(time_strings[name2] % count2)
|
||||
return result
|
||||
|
||||
|
||||
def timeuntil(d, now=None, time_strings=None):
|
||||
"""
|
||||
Like timesince, but return a string measuring the time until the given time.
|
||||
"""
|
||||
return timesince(d, now, reversed=True, time_strings=time_strings)
|
||||
287
venv/lib/python3.8/site-packages/django/utils/timezone.py
Normal file
287
venv/lib/python3.8/site-packages/django/utils/timezone.py
Normal file
@@ -0,0 +1,287 @@
|
||||
"""
|
||||
Timezone-related classes and functions.
|
||||
"""
|
||||
|
||||
import functools
|
||||
import warnings
|
||||
from contextlib import ContextDecorator
|
||||
from datetime import datetime, timedelta, timezone, tzinfo
|
||||
|
||||
import pytz
|
||||
from asgiref.local import Local
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.deprecation import RemovedInDjango31Warning
|
||||
|
||||
__all__ = [
|
||||
'utc', 'get_fixed_timezone',
|
||||
'get_default_timezone', 'get_default_timezone_name',
|
||||
'get_current_timezone', 'get_current_timezone_name',
|
||||
'activate', 'deactivate', 'override',
|
||||
'localtime', 'now',
|
||||
'is_aware', 'is_naive', 'make_aware', 'make_naive',
|
||||
]
|
||||
|
||||
|
||||
# UTC and local time zones
|
||||
|
||||
ZERO = timedelta(0)
|
||||
|
||||
|
||||
class FixedOffset(tzinfo):
|
||||
"""
|
||||
Fixed offset in minutes east from UTC. Taken from Python's docs.
|
||||
|
||||
Kept as close as possible to the reference version. __init__ was changed
|
||||
to make its arguments optional, according to Python's requirement that
|
||||
tzinfo subclasses can be instantiated without arguments.
|
||||
"""
|
||||
|
||||
def __init__(self, offset=None, name=None):
|
||||
warnings.warn(
|
||||
'FixedOffset is deprecated in favor of datetime.timezone',
|
||||
RemovedInDjango31Warning, stacklevel=2,
|
||||
)
|
||||
if offset is not None:
|
||||
self.__offset = timedelta(minutes=offset)
|
||||
if name is not None:
|
||||
self.__name = name
|
||||
|
||||
def utcoffset(self, dt):
|
||||
return self.__offset
|
||||
|
||||
def tzname(self, dt):
|
||||
return self.__name
|
||||
|
||||
def dst(self, dt):
|
||||
return ZERO
|
||||
|
||||
|
||||
# UTC time zone as a tzinfo instance.
|
||||
utc = pytz.utc
|
||||
|
||||
|
||||
def get_fixed_timezone(offset):
|
||||
"""Return a tzinfo instance with a fixed offset from UTC."""
|
||||
if isinstance(offset, timedelta):
|
||||
offset = offset.total_seconds() // 60
|
||||
sign = '-' if offset < 0 else '+'
|
||||
hhmm = '%02d%02d' % divmod(abs(offset), 60)
|
||||
name = sign + hhmm
|
||||
return timezone(timedelta(minutes=offset), name)
|
||||
|
||||
|
||||
# In order to avoid accessing settings at compile time,
|
||||
# wrap the logic in a function and cache the result.
|
||||
@functools.lru_cache()
|
||||
def get_default_timezone():
|
||||
"""
|
||||
Return the default time zone as a tzinfo instance.
|
||||
|
||||
This is the time zone defined by settings.TIME_ZONE.
|
||||
"""
|
||||
return pytz.timezone(settings.TIME_ZONE)
|
||||
|
||||
|
||||
# This function exists for consistency with get_current_timezone_name
|
||||
def get_default_timezone_name():
|
||||
"""Return the name of the default time zone."""
|
||||
return _get_timezone_name(get_default_timezone())
|
||||
|
||||
|
||||
_active = Local()
|
||||
|
||||
|
||||
def get_current_timezone():
|
||||
"""Return the currently active time zone as a tzinfo instance."""
|
||||
return getattr(_active, "value", get_default_timezone())
|
||||
|
||||
|
||||
def get_current_timezone_name():
|
||||
"""Return the name of the currently active time zone."""
|
||||
return _get_timezone_name(get_current_timezone())
|
||||
|
||||
|
||||
def _get_timezone_name(timezone):
|
||||
"""Return the name of ``timezone``."""
|
||||
return timezone.tzname(None)
|
||||
|
||||
# Timezone selection functions.
|
||||
|
||||
# These functions don't change os.environ['TZ'] and call time.tzset()
|
||||
# because it isn't thread safe.
|
||||
|
||||
|
||||
def activate(timezone):
|
||||
"""
|
||||
Set the time zone for the current thread.
|
||||
|
||||
The ``timezone`` argument must be an instance of a tzinfo subclass or a
|
||||
time zone name.
|
||||
"""
|
||||
if isinstance(timezone, tzinfo):
|
||||
_active.value = timezone
|
||||
elif isinstance(timezone, str):
|
||||
_active.value = pytz.timezone(timezone)
|
||||
else:
|
||||
raise ValueError("Invalid timezone: %r" % timezone)
|
||||
|
||||
|
||||
def deactivate():
|
||||
"""
|
||||
Unset the time zone for the current thread.
|
||||
|
||||
Django will then use the time zone defined by settings.TIME_ZONE.
|
||||
"""
|
||||
if hasattr(_active, "value"):
|
||||
del _active.value
|
||||
|
||||
|
||||
class override(ContextDecorator):
|
||||
"""
|
||||
Temporarily set the time zone for the current thread.
|
||||
|
||||
This is a context manager that uses django.utils.timezone.activate()
|
||||
to set the timezone on entry and restores the previously active timezone
|
||||
on exit.
|
||||
|
||||
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
|
||||
time zone name, or ``None``. If it is ``None``, Django enables the default
|
||||
time zone.
|
||||
"""
|
||||
def __init__(self, timezone):
|
||||
self.timezone = timezone
|
||||
|
||||
def __enter__(self):
|
||||
self.old_timezone = getattr(_active, 'value', None)
|
||||
if self.timezone is None:
|
||||
deactivate()
|
||||
else:
|
||||
activate(self.timezone)
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
if self.old_timezone is None:
|
||||
deactivate()
|
||||
else:
|
||||
_active.value = self.old_timezone
|
||||
|
||||
|
||||
# Templates
|
||||
|
||||
def template_localtime(value, use_tz=None):
|
||||
"""
|
||||
Check if value is a datetime and converts it to local time if necessary.
|
||||
|
||||
If use_tz is provided and is not None, that will force the value to
|
||||
be converted (or not), overriding the value of settings.USE_TZ.
|
||||
|
||||
This function is designed for use by the template engine.
|
||||
"""
|
||||
should_convert = (
|
||||
isinstance(value, datetime) and
|
||||
(settings.USE_TZ if use_tz is None else use_tz) and
|
||||
not is_naive(value) and
|
||||
getattr(value, 'convert_to_local_time', True)
|
||||
)
|
||||
return localtime(value) if should_convert else value
|
||||
|
||||
|
||||
# Utilities
|
||||
|
||||
def localtime(value=None, timezone=None):
|
||||
"""
|
||||
Convert an aware datetime.datetime to local time.
|
||||
|
||||
Only aware datetimes are allowed. When value is omitted, it defaults to
|
||||
now().
|
||||
|
||||
Local time is defined by the current time zone, unless another time zone
|
||||
is specified.
|
||||
"""
|
||||
if value is None:
|
||||
value = now()
|
||||
if timezone is None:
|
||||
timezone = get_current_timezone()
|
||||
# Emulate the behavior of astimezone() on Python < 3.6.
|
||||
if is_naive(value):
|
||||
raise ValueError("localtime() cannot be applied to a naive datetime")
|
||||
return value.astimezone(timezone)
|
||||
|
||||
|
||||
def localdate(value=None, timezone=None):
|
||||
"""
|
||||
Convert an aware datetime to local time and return the value's date.
|
||||
|
||||
Only aware datetimes are allowed. When value is omitted, it defaults to
|
||||
now().
|
||||
|
||||
Local time is defined by the current time zone, unless another time zone is
|
||||
specified.
|
||||
"""
|
||||
return localtime(value, timezone).date()
|
||||
|
||||
|
||||
def now():
|
||||
"""
|
||||
Return an aware or naive datetime.datetime, depending on settings.USE_TZ.
|
||||
"""
|
||||
if settings.USE_TZ:
|
||||
# timeit shows that datetime.now(tz=utc) is 24% slower
|
||||
return datetime.utcnow().replace(tzinfo=utc)
|
||||
else:
|
||||
return datetime.now()
|
||||
|
||||
|
||||
# By design, these four functions don't perform any checks on their arguments.
|
||||
# The caller should ensure that they don't receive an invalid value like None.
|
||||
|
||||
def is_aware(value):
|
||||
"""
|
||||
Determine if a given datetime.datetime is aware.
|
||||
|
||||
The concept is defined in Python's docs:
|
||||
https://docs.python.org/library/datetime.html#datetime.tzinfo
|
||||
|
||||
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
|
||||
value.utcoffset() implements the appropriate logic.
|
||||
"""
|
||||
return value.utcoffset() is not None
|
||||
|
||||
|
||||
def is_naive(value):
|
||||
"""
|
||||
Determine if a given datetime.datetime is naive.
|
||||
|
||||
The concept is defined in Python's docs:
|
||||
https://docs.python.org/library/datetime.html#datetime.tzinfo
|
||||
|
||||
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
|
||||
value.utcoffset() implements the appropriate logic.
|
||||
"""
|
||||
return value.utcoffset() is None
|
||||
|
||||
|
||||
def make_aware(value, timezone=None, is_dst=None):
|
||||
"""Make a naive datetime.datetime in a given time zone aware."""
|
||||
if timezone is None:
|
||||
timezone = get_current_timezone()
|
||||
if hasattr(timezone, 'localize'):
|
||||
# This method is available for pytz time zones.
|
||||
return timezone.localize(value, is_dst=is_dst)
|
||||
else:
|
||||
# Check that we won't overwrite the timezone of an aware datetime.
|
||||
if is_aware(value):
|
||||
raise ValueError(
|
||||
"make_aware expects a naive datetime, got %s" % value)
|
||||
# This may be wrong around DST changes!
|
||||
return value.replace(tzinfo=timezone)
|
||||
|
||||
|
||||
def make_naive(value, timezone=None):
|
||||
"""Make an aware datetime.datetime naive in a given time zone."""
|
||||
if timezone is None:
|
||||
timezone = get_current_timezone()
|
||||
# Emulate the behavior of astimezone() on Python < 3.6.
|
||||
if is_naive(value):
|
||||
raise ValueError("make_naive() cannot be applied to a naive datetime")
|
||||
return value.astimezone(timezone).replace(tzinfo=None)
|
||||
@@ -0,0 +1,36 @@
|
||||
class CyclicDependencyError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def topological_sort_as_sets(dependency_graph):
|
||||
"""
|
||||
Variation of Kahn's algorithm (1962) that returns sets.
|
||||
|
||||
Take a dependency graph as a dictionary of node => dependencies.
|
||||
|
||||
Yield sets of items in topological order, where the first set contains
|
||||
all nodes without dependencies, and each following set contains all
|
||||
nodes that may depend on the nodes only in the previously yielded sets.
|
||||
"""
|
||||
todo = dependency_graph.copy()
|
||||
while todo:
|
||||
current = {node for node, deps in todo.items() if not deps}
|
||||
|
||||
if not current:
|
||||
raise CyclicDependencyError('Cyclic dependency in graph: {}'.format(
|
||||
', '.join(repr(x) for x in todo.items())))
|
||||
|
||||
yield current
|
||||
|
||||
# remove current from todo's nodes & dependencies
|
||||
todo = {node: (dependencies - current) for node, dependencies in
|
||||
todo.items() if node not in current}
|
||||
|
||||
|
||||
def stable_topological_sort(l, dependency_graph):
|
||||
result = []
|
||||
for layer in topological_sort_as_sets(dependency_graph):
|
||||
for node in l:
|
||||
if node in layer:
|
||||
result.append(node)
|
||||
return result
|
||||
@@ -0,0 +1,339 @@
|
||||
"""
|
||||
Internationalization support.
|
||||
"""
|
||||
import re
|
||||
import warnings
|
||||
from contextlib import ContextDecorator
|
||||
from decimal import ROUND_UP, Decimal
|
||||
|
||||
from django.utils.autoreload import autoreload_started, file_changed
|
||||
from django.utils.deprecation import RemovedInDjango40Warning
|
||||
from django.utils.functional import lazy
|
||||
|
||||
__all__ = [
|
||||
'activate', 'deactivate', 'override', 'deactivate_all',
|
||||
'get_language', 'get_language_from_request',
|
||||
'get_language_info', 'get_language_bidi',
|
||||
'check_for_language', 'to_language', 'to_locale', 'templatize',
|
||||
'gettext', 'gettext_lazy', 'gettext_noop',
|
||||
'ugettext', 'ugettext_lazy', 'ugettext_noop',
|
||||
'ngettext', 'ngettext_lazy',
|
||||
'ungettext', 'ungettext_lazy',
|
||||
'pgettext', 'pgettext_lazy',
|
||||
'npgettext', 'npgettext_lazy',
|
||||
'LANGUAGE_SESSION_KEY',
|
||||
]
|
||||
|
||||
LANGUAGE_SESSION_KEY = '_language'
|
||||
|
||||
|
||||
class TranslatorCommentWarning(SyntaxWarning):
|
||||
pass
|
||||
|
||||
|
||||
# Here be dragons, so a short explanation of the logic won't hurt:
|
||||
# We are trying to solve two problems: (1) access settings, in particular
|
||||
# settings.USE_I18N, as late as possible, so that modules can be imported
|
||||
# without having to first configure Django, and (2) if some other code creates
|
||||
# a reference to one of these functions, don't break that reference when we
|
||||
# replace the functions with their real counterparts (once we do access the
|
||||
# settings).
|
||||
|
||||
class Trans:
|
||||
"""
|
||||
The purpose of this class is to store the actual translation function upon
|
||||
receiving the first call to that function. After this is done, changes to
|
||||
USE_I18N will have no effect to which function is served upon request. If
|
||||
your tests rely on changing USE_I18N, you can delete all the functions
|
||||
from _trans.__dict__.
|
||||
|
||||
Note that storing the function with setattr will have a noticeable
|
||||
performance effect, as access to the function goes the normal path,
|
||||
instead of using __getattr__.
|
||||
"""
|
||||
|
||||
def __getattr__(self, real_name):
|
||||
from django.conf import settings
|
||||
if settings.USE_I18N:
|
||||
from django.utils.translation import trans_real as trans
|
||||
from django.utils.translation.reloader import watch_for_translation_changes, translation_file_changed
|
||||
autoreload_started.connect(watch_for_translation_changes, dispatch_uid='translation_file_changed')
|
||||
file_changed.connect(translation_file_changed, dispatch_uid='translation_file_changed')
|
||||
else:
|
||||
from django.utils.translation import trans_null as trans
|
||||
setattr(self, real_name, getattr(trans, real_name))
|
||||
return getattr(trans, real_name)
|
||||
|
||||
|
||||
_trans = Trans()
|
||||
|
||||
# The Trans class is no more needed, so remove it from the namespace.
|
||||
del Trans
|
||||
|
||||
|
||||
def gettext_noop(message):
|
||||
return _trans.gettext_noop(message)
|
||||
|
||||
|
||||
def ugettext_noop(message):
|
||||
"""
|
||||
A legacy compatibility wrapper for Unicode handling on Python 2.
|
||||
Alias of gettext_noop() since Django 2.0.
|
||||
"""
|
||||
warnings.warn(
|
||||
'django.utils.translation.ugettext_noop() is deprecated in favor of '
|
||||
'django.utils.translation.gettext_noop().',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return gettext_noop(message)
|
||||
|
||||
|
||||
def gettext(message):
|
||||
return _trans.gettext(message)
|
||||
|
||||
|
||||
def ugettext(message):
|
||||
"""
|
||||
A legacy compatibility wrapper for Unicode handling on Python 2.
|
||||
Alias of gettext() since Django 2.0.
|
||||
"""
|
||||
warnings.warn(
|
||||
'django.utils.translation.ugettext() is deprecated in favor of '
|
||||
'django.utils.translation.gettext().',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return gettext(message)
|
||||
|
||||
|
||||
def ngettext(singular, plural, number):
|
||||
return _trans.ngettext(singular, plural, number)
|
||||
|
||||
|
||||
def ungettext(singular, plural, number):
|
||||
"""
|
||||
A legacy compatibility wrapper for Unicode handling on Python 2.
|
||||
Alias of ngettext() since Django 2.0.
|
||||
"""
|
||||
warnings.warn(
|
||||
'django.utils.translation.ungettext() is deprecated in favor of '
|
||||
'django.utils.translation.ngettext().',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return ngettext(singular, plural, number)
|
||||
|
||||
|
||||
def pgettext(context, message):
|
||||
return _trans.pgettext(context, message)
|
||||
|
||||
|
||||
def npgettext(context, singular, plural, number):
|
||||
return _trans.npgettext(context, singular, plural, number)
|
||||
|
||||
|
||||
gettext_lazy = lazy(gettext, str)
|
||||
pgettext_lazy = lazy(pgettext, str)
|
||||
|
||||
|
||||
def ugettext_lazy(message):
|
||||
"""
|
||||
A legacy compatibility wrapper for Unicode handling on Python 2. Has been
|
||||
Alias of gettext_lazy since Django 2.0.
|
||||
"""
|
||||
warnings.warn(
|
||||
'django.utils.translation.ugettext_lazy() is deprecated in favor of '
|
||||
'django.utils.translation.gettext_lazy().',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return gettext_lazy(message)
|
||||
|
||||
|
||||
def lazy_number(func, resultclass, number=None, **kwargs):
|
||||
if isinstance(number, int):
|
||||
kwargs['number'] = number
|
||||
proxy = lazy(func, resultclass)(**kwargs)
|
||||
else:
|
||||
original_kwargs = kwargs.copy()
|
||||
|
||||
class NumberAwareString(resultclass):
|
||||
def __bool__(self):
|
||||
return bool(kwargs['singular'])
|
||||
|
||||
def _get_number_value(self, values):
|
||||
try:
|
||||
return values[number]
|
||||
except KeyError:
|
||||
raise KeyError(
|
||||
"Your dictionary lacks key '%s\'. Please provide "
|
||||
"it, because it is required to determine whether "
|
||||
"string is singular or plural." % number
|
||||
)
|
||||
|
||||
def _translate(self, number_value):
|
||||
kwargs['number'] = number_value
|
||||
return func(**kwargs)
|
||||
|
||||
def format(self, *args, **kwargs):
|
||||
number_value = self._get_number_value(kwargs) if kwargs and number else args[0]
|
||||
return self._translate(number_value).format(*args, **kwargs)
|
||||
|
||||
def __mod__(self, rhs):
|
||||
if isinstance(rhs, dict) and number:
|
||||
number_value = self._get_number_value(rhs)
|
||||
else:
|
||||
number_value = rhs
|
||||
translated = self._translate(number_value)
|
||||
try:
|
||||
translated = translated % rhs
|
||||
except TypeError:
|
||||
# String doesn't contain a placeholder for the number.
|
||||
pass
|
||||
return translated
|
||||
|
||||
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
|
||||
proxy.__reduce__ = lambda: (_lazy_number_unpickle, (func, resultclass, number, original_kwargs))
|
||||
return proxy
|
||||
|
||||
|
||||
def _lazy_number_unpickle(func, resultclass, number, kwargs):
|
||||
return lazy_number(func, resultclass, number=number, **kwargs)
|
||||
|
||||
|
||||
def ngettext_lazy(singular, plural, number=None):
|
||||
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
|
||||
|
||||
|
||||
def ungettext_lazy(singular, plural, number=None):
|
||||
"""
|
||||
A legacy compatibility wrapper for Unicode handling on Python 2.
|
||||
An alias of ungettext_lazy() since Django 2.0.
|
||||
"""
|
||||
warnings.warn(
|
||||
'django.utils.translation.ungettext_lazy() is deprecated in favor of '
|
||||
'django.utils.translation.ngettext_lazy().',
|
||||
RemovedInDjango40Warning, stacklevel=2,
|
||||
)
|
||||
return ngettext_lazy(singular, plural, number)
|
||||
|
||||
|
||||
def npgettext_lazy(context, singular, plural, number=None):
|
||||
return lazy_number(npgettext, str, context=context, singular=singular, plural=plural, number=number)
|
||||
|
||||
|
||||
def activate(language):
|
||||
return _trans.activate(language)
|
||||
|
||||
|
||||
def deactivate():
|
||||
return _trans.deactivate()
|
||||
|
||||
|
||||
class override(ContextDecorator):
|
||||
def __init__(self, language, deactivate=False):
|
||||
self.language = language
|
||||
self.deactivate = deactivate
|
||||
|
||||
def __enter__(self):
|
||||
self.old_language = get_language()
|
||||
if self.language is not None:
|
||||
activate(self.language)
|
||||
else:
|
||||
deactivate_all()
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
if self.old_language is None:
|
||||
deactivate_all()
|
||||
elif self.deactivate:
|
||||
deactivate()
|
||||
else:
|
||||
activate(self.old_language)
|
||||
|
||||
|
||||
def get_language():
|
||||
return _trans.get_language()
|
||||
|
||||
|
||||
def get_language_bidi():
|
||||
return _trans.get_language_bidi()
|
||||
|
||||
|
||||
def check_for_language(lang_code):
|
||||
return _trans.check_for_language(lang_code)
|
||||
|
||||
|
||||
def to_language(locale):
|
||||
"""Turn a locale name (en_US) into a language name (en-us)."""
|
||||
p = locale.find('_')
|
||||
if p >= 0:
|
||||
return locale[:p].lower() + '-' + locale[p + 1:].lower()
|
||||
else:
|
||||
return locale.lower()
|
||||
|
||||
|
||||
def to_locale(language):
|
||||
"""Turn a language name (en-us) into a locale name (en_US)."""
|
||||
language, _, country = language.lower().partition('-')
|
||||
if not country:
|
||||
return language
|
||||
# A language with > 2 characters after the dash only has its first
|
||||
# character after the dash capitalized; e.g. sr-latn becomes sr_Latn.
|
||||
# A language with 2 characters after the dash has both characters
|
||||
# capitalized; e.g. en-us becomes en_US.
|
||||
country, _, tail = country.partition('-')
|
||||
country = country.title() if len(country) > 2 else country.upper()
|
||||
if tail:
|
||||
country += '-' + tail
|
||||
return language + '_' + country
|
||||
|
||||
|
||||
def get_language_from_request(request, check_path=False):
|
||||
return _trans.get_language_from_request(request, check_path)
|
||||
|
||||
|
||||
def get_language_from_path(path):
|
||||
return _trans.get_language_from_path(path)
|
||||
|
||||
|
||||
def get_supported_language_variant(lang_code, *, strict=False):
|
||||
return _trans.get_supported_language_variant(lang_code, strict)
|
||||
|
||||
|
||||
def templatize(src, **kwargs):
|
||||
from .template import templatize
|
||||
return templatize(src, **kwargs)
|
||||
|
||||
|
||||
def deactivate_all():
|
||||
return _trans.deactivate_all()
|
||||
|
||||
|
||||
def get_language_info(lang_code):
|
||||
from django.conf.locale import LANG_INFO
|
||||
try:
|
||||
lang_info = LANG_INFO[lang_code]
|
||||
if 'fallback' in lang_info and 'name' not in lang_info:
|
||||
info = get_language_info(lang_info['fallback'][0])
|
||||
else:
|
||||
info = lang_info
|
||||
except KeyError:
|
||||
if '-' not in lang_code:
|
||||
raise KeyError("Unknown language code %s." % lang_code)
|
||||
generic_lang_code = lang_code.split('-')[0]
|
||||
try:
|
||||
info = LANG_INFO[generic_lang_code]
|
||||
except KeyError:
|
||||
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
|
||||
|
||||
if info:
|
||||
info['name_translated'] = gettext_lazy(info['name'])
|
||||
return info
|
||||
|
||||
|
||||
trim_whitespace_re = re.compile(r'\s*\n\s*')
|
||||
|
||||
|
||||
def trim_whitespace(s):
|
||||
return trim_whitespace_re.sub(' ', s.strip())
|
||||
|
||||
|
||||
def round_away_from_one(value):
|
||||
return int(Decimal(value - 1).quantize(Decimal('0'), rounding=ROUND_UP)) + 1
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,29 @@
|
||||
from pathlib import Path
|
||||
|
||||
from asgiref.local import Local
|
||||
|
||||
from django.apps import apps
|
||||
|
||||
|
||||
def watch_for_translation_changes(sender, **kwargs):
|
||||
"""Register file watchers for .mo files in potential locale paths."""
|
||||
from django.conf import settings
|
||||
|
||||
if settings.USE_I18N:
|
||||
directories = [Path('locale')]
|
||||
directories.extend(Path(config.path) / 'locale' for config in apps.get_app_configs())
|
||||
directories.extend(Path(p) for p in settings.LOCALE_PATHS)
|
||||
for path in directories:
|
||||
sender.watch_dir(path, '**/*.mo')
|
||||
|
||||
|
||||
def translation_file_changed(sender, file_path, **kwargs):
|
||||
"""Clear the internal translations cache if a .mo file is modified."""
|
||||
if file_path.suffix == '.mo':
|
||||
import gettext
|
||||
from django.utils.translation import trans_real
|
||||
gettext._translations = {}
|
||||
trans_real._translations = {}
|
||||
trans_real._default = None
|
||||
trans_real._active = Local()
|
||||
return True
|
||||
@@ -0,0 +1,227 @@
|
||||
import re
|
||||
import warnings
|
||||
from io import StringIO
|
||||
|
||||
from django.template.base import TRANSLATOR_COMMENT_MARK, Lexer, TokenType
|
||||
|
||||
from . import TranslatorCommentWarning, trim_whitespace
|
||||
|
||||
dot_re = re.compile(r'\S')
|
||||
|
||||
|
||||
def blankout(src, char):
|
||||
"""
|
||||
Change every non-whitespace character to the given char.
|
||||
Used in the templatize function.
|
||||
"""
|
||||
return dot_re.sub(char, src)
|
||||
|
||||
|
||||
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
|
||||
inline_re = re.compile(
|
||||
# Match the trans 'some text' part
|
||||
r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))"""
|
||||
# Match and ignore optional filters
|
||||
r"""(?:\s*\|\s*[^\s:]+(?::(?:[^\s'":]+|(?:"[^"]*?")|(?:'[^']*?')))?)*"""
|
||||
# Match the optional context part
|
||||
r"""(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*"""
|
||||
)
|
||||
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
|
||||
endblock_re = re.compile(r"""^\s*endblocktrans$""")
|
||||
plural_re = re.compile(r"""^\s*plural$""")
|
||||
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
|
||||
|
||||
|
||||
def templatize(src, origin=None):
|
||||
"""
|
||||
Turn a Django template into something that is understood by xgettext. It
|
||||
does so by translating the Django translation tags into standard gettext
|
||||
function invocations.
|
||||
"""
|
||||
out = StringIO('')
|
||||
message_context = None
|
||||
intrans = False
|
||||
inplural = False
|
||||
trimmed = False
|
||||
singular = []
|
||||
plural = []
|
||||
incomment = False
|
||||
comment = []
|
||||
lineno_comment_map = {}
|
||||
comment_lineno_cache = None
|
||||
# Adding the u prefix allows gettext to recognize the string (#26093).
|
||||
raw_prefix = 'u'
|
||||
|
||||
def join_tokens(tokens, trim=False):
|
||||
message = ''.join(tokens)
|
||||
if trim:
|
||||
message = trim_whitespace(message)
|
||||
return message
|
||||
|
||||
for t in Lexer(src).tokenize():
|
||||
if incomment:
|
||||
if t.token_type == TokenType.BLOCK and t.contents == 'endcomment':
|
||||
content = ''.join(comment)
|
||||
translators_comment_start = None
|
||||
for lineno, line in enumerate(content.splitlines(True)):
|
||||
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
|
||||
translators_comment_start = lineno
|
||||
for lineno, line in enumerate(content.splitlines(True)):
|
||||
if translators_comment_start is not None and lineno >= translators_comment_start:
|
||||
out.write(' # %s' % line)
|
||||
else:
|
||||
out.write(' #\n')
|
||||
incomment = False
|
||||
comment = []
|
||||
else:
|
||||
comment.append(t.contents)
|
||||
elif intrans:
|
||||
if t.token_type == TokenType.BLOCK:
|
||||
endbmatch = endblock_re.match(t.contents)
|
||||
pluralmatch = plural_re.match(t.contents)
|
||||
if endbmatch:
|
||||
if inplural:
|
||||
if message_context:
|
||||
out.write(' npgettext({p}{!r}, {p}{!r}, {p}{!r},count) '.format(
|
||||
message_context,
|
||||
join_tokens(singular, trimmed),
|
||||
join_tokens(plural, trimmed),
|
||||
p=raw_prefix,
|
||||
))
|
||||
else:
|
||||
out.write(' ngettext({p}{!r}, {p}{!r}, count) '.format(
|
||||
join_tokens(singular, trimmed),
|
||||
join_tokens(plural, trimmed),
|
||||
p=raw_prefix,
|
||||
))
|
||||
for part in singular:
|
||||
out.write(blankout(part, 'S'))
|
||||
for part in plural:
|
||||
out.write(blankout(part, 'P'))
|
||||
else:
|
||||
if message_context:
|
||||
out.write(' pgettext({p}{!r}, {p}{!r}) '.format(
|
||||
message_context,
|
||||
join_tokens(singular, trimmed),
|
||||
p=raw_prefix,
|
||||
))
|
||||
else:
|
||||
out.write(' gettext({p}{!r}) '.format(
|
||||
join_tokens(singular, trimmed),
|
||||
p=raw_prefix,
|
||||
))
|
||||
for part in singular:
|
||||
out.write(blankout(part, 'S'))
|
||||
message_context = None
|
||||
intrans = False
|
||||
inplural = False
|
||||
singular = []
|
||||
plural = []
|
||||
elif pluralmatch:
|
||||
inplural = True
|
||||
else:
|
||||
filemsg = ''
|
||||
if origin:
|
||||
filemsg = 'file %s, ' % origin
|
||||
raise SyntaxError(
|
||||
"Translation blocks must not include other block tags: "
|
||||
"%s (%sline %d)" % (t.contents, filemsg, t.lineno)
|
||||
)
|
||||
elif t.token_type == TokenType.VAR:
|
||||
if inplural:
|
||||
plural.append('%%(%s)s' % t.contents)
|
||||
else:
|
||||
singular.append('%%(%s)s' % t.contents)
|
||||
elif t.token_type == TokenType.TEXT:
|
||||
contents = t.contents.replace('%', '%%')
|
||||
if inplural:
|
||||
plural.append(contents)
|
||||
else:
|
||||
singular.append(contents)
|
||||
else:
|
||||
# Handle comment tokens (`{# ... #}`) plus other constructs on
|
||||
# the same line:
|
||||
if comment_lineno_cache is not None:
|
||||
cur_lineno = t.lineno + t.contents.count('\n')
|
||||
if comment_lineno_cache == cur_lineno:
|
||||
if t.token_type != TokenType.COMMENT:
|
||||
for c in lineno_comment_map[comment_lineno_cache]:
|
||||
filemsg = ''
|
||||
if origin:
|
||||
filemsg = 'file %s, ' % origin
|
||||
warn_msg = (
|
||||
"The translator-targeted comment '%s' "
|
||||
"(%sline %d) was ignored, because it wasn't "
|
||||
"the last item on the line."
|
||||
) % (c, filemsg, comment_lineno_cache)
|
||||
warnings.warn(warn_msg, TranslatorCommentWarning)
|
||||
lineno_comment_map[comment_lineno_cache] = []
|
||||
else:
|
||||
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
|
||||
comment_lineno_cache = None
|
||||
|
||||
if t.token_type == TokenType.BLOCK:
|
||||
imatch = inline_re.match(t.contents)
|
||||
bmatch = block_re.match(t.contents)
|
||||
cmatches = constant_re.findall(t.contents)
|
||||
if imatch:
|
||||
g = imatch.group(1)
|
||||
if g[0] == '"':
|
||||
g = g.strip('"')
|
||||
elif g[0] == "'":
|
||||
g = g.strip("'")
|
||||
g = g.replace('%', '%%')
|
||||
if imatch.group(2):
|
||||
# A context is provided
|
||||
context_match = context_re.match(imatch.group(2))
|
||||
message_context = context_match.group(1)
|
||||
if message_context[0] == '"':
|
||||
message_context = message_context.strip('"')
|
||||
elif message_context[0] == "'":
|
||||
message_context = message_context.strip("'")
|
||||
out.write(' pgettext({p}{!r}, {p}{!r}) '.format(
|
||||
message_context, g, p=raw_prefix
|
||||
))
|
||||
message_context = None
|
||||
else:
|
||||
out.write(' gettext({p}{!r}) '.format(g, p=raw_prefix))
|
||||
elif bmatch:
|
||||
for fmatch in constant_re.findall(t.contents):
|
||||
out.write(' _(%s) ' % fmatch)
|
||||
if bmatch.group(1):
|
||||
# A context is provided
|
||||
context_match = context_re.match(bmatch.group(1))
|
||||
message_context = context_match.group(1)
|
||||
if message_context[0] == '"':
|
||||
message_context = message_context.strip('"')
|
||||
elif message_context[0] == "'":
|
||||
message_context = message_context.strip("'")
|
||||
intrans = True
|
||||
inplural = False
|
||||
trimmed = 'trimmed' in t.split_contents()
|
||||
singular = []
|
||||
plural = []
|
||||
elif cmatches:
|
||||
for cmatch in cmatches:
|
||||
out.write(' _(%s) ' % cmatch)
|
||||
elif t.contents == 'comment':
|
||||
incomment = True
|
||||
else:
|
||||
out.write(blankout(t.contents, 'B'))
|
||||
elif t.token_type == TokenType.VAR:
|
||||
parts = t.contents.split('|')
|
||||
cmatch = constant_re.match(parts[0])
|
||||
if cmatch:
|
||||
out.write(' _(%s) ' % cmatch.group(1))
|
||||
for p in parts[1:]:
|
||||
if p.find(':_(') >= 0:
|
||||
out.write(' %s ' % p.split(':', 1)[1])
|
||||
else:
|
||||
out.write(blankout(p, 'F'))
|
||||
elif t.token_type == TokenType.COMMENT:
|
||||
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
|
||||
lineno_comment_map.setdefault(t.lineno, []).append(t.contents)
|
||||
comment_lineno_cache = t.lineno
|
||||
else:
|
||||
out.write(blankout(t.contents, 'X'))
|
||||
return out.getvalue()
|
||||
@@ -0,0 +1,67 @@
|
||||
# These are versions of the functions in django.utils.translation.trans_real
|
||||
# that don't actually do anything. This is purely for performance, so that
|
||||
# settings.USE_I18N = False can use this module rather than trans_real.py.
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
def gettext(message):
|
||||
return message
|
||||
|
||||
|
||||
gettext_noop = gettext_lazy = _ = gettext
|
||||
|
||||
|
||||
def ngettext(singular, plural, number):
|
||||
if number == 1:
|
||||
return singular
|
||||
return plural
|
||||
|
||||
|
||||
ngettext_lazy = ngettext
|
||||
|
||||
|
||||
def pgettext(context, message):
|
||||
return gettext(message)
|
||||
|
||||
|
||||
def npgettext(context, singular, plural, number):
|
||||
return ngettext(singular, plural, number)
|
||||
|
||||
|
||||
def activate(x):
|
||||
return None
|
||||
|
||||
|
||||
def deactivate():
|
||||
return None
|
||||
|
||||
|
||||
deactivate_all = deactivate
|
||||
|
||||
|
||||
def get_language():
|
||||
return settings.LANGUAGE_CODE
|
||||
|
||||
|
||||
def get_language_bidi():
|
||||
return settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
|
||||
|
||||
|
||||
def check_for_language(x):
|
||||
return True
|
||||
|
||||
|
||||
def get_language_from_request(request, check_path=False):
|
||||
return settings.LANGUAGE_CODE
|
||||
|
||||
|
||||
def get_language_from_path(request):
|
||||
return None
|
||||
|
||||
|
||||
def get_supported_language_variant(lang_code, strict=False):
|
||||
if lang_code == settings.LANGUAGE_CODE:
|
||||
return lang_code
|
||||
else:
|
||||
raise LookupError(lang_code)
|
||||
@@ -0,0 +1,579 @@
|
||||
"""Translation helper functions."""
|
||||
import functools
|
||||
import gettext as gettext_module
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from asgiref.local import Local
|
||||
|
||||
from django.apps import apps
|
||||
from django.conf import settings
|
||||
from django.conf.locale import LANG_INFO
|
||||
from django.core.exceptions import AppRegistryNotReady
|
||||
from django.core.signals import setting_changed
|
||||
from django.dispatch import receiver
|
||||
from django.utils.safestring import SafeData, mark_safe
|
||||
|
||||
from . import to_language, to_locale
|
||||
|
||||
# Translations are cached in a dictionary for every language.
|
||||
# The active translations are stored by threadid to make them thread local.
|
||||
_translations = {}
|
||||
_active = Local()
|
||||
|
||||
# The default translation is based on the settings file.
|
||||
_default = None
|
||||
|
||||
# magic gettext number to separate context from message
|
||||
CONTEXT_SEPARATOR = "\x04"
|
||||
|
||||
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
|
||||
# and RFC 3066, section 2.1
|
||||
accept_language_re = re.compile(r'''
|
||||
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
|
||||
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:\.0{,3})?))? # Optional "q=1.00", "q=0.8"
|
||||
(?:\s*,\s*|$) # Multiple accepts per header.
|
||||
''', re.VERBOSE)
|
||||
|
||||
language_code_re = re.compile(
|
||||
r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$',
|
||||
re.IGNORECASE
|
||||
)
|
||||
|
||||
language_code_prefix_re = re.compile(r'^/(\w+([@-]\w+)?)(/|$)')
|
||||
|
||||
|
||||
@receiver(setting_changed)
|
||||
def reset_cache(**kwargs):
|
||||
"""
|
||||
Reset global state when LANGUAGES setting has been changed, as some
|
||||
languages should no longer be accepted.
|
||||
"""
|
||||
if kwargs['setting'] in ('LANGUAGES', 'LANGUAGE_CODE'):
|
||||
check_for_language.cache_clear()
|
||||
get_languages.cache_clear()
|
||||
get_supported_language_variant.cache_clear()
|
||||
|
||||
|
||||
class TranslationCatalog:
|
||||
"""
|
||||
Simulate a dict for DjangoTranslation._catalog so as multiple catalogs
|
||||
with different plural equations are kept separate.
|
||||
"""
|
||||
def __init__(self, trans=None):
|
||||
self._catalogs = [trans._catalog.copy()] if trans else [{}]
|
||||
self._plurals = [trans.plural] if trans else [lambda n: int(n != 1)]
|
||||
|
||||
def __getitem__(self, key):
|
||||
for cat in self._catalogs:
|
||||
try:
|
||||
return cat[key]
|
||||
except KeyError:
|
||||
pass
|
||||
raise KeyError(key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self._catalogs[0][key] = value
|
||||
|
||||
def __contains__(self, key):
|
||||
return any(key in cat for cat in self._catalogs)
|
||||
|
||||
def items(self):
|
||||
for cat in self._catalogs:
|
||||
yield from cat.items()
|
||||
|
||||
def keys(self):
|
||||
for cat in self._catalogs:
|
||||
yield from cat.keys()
|
||||
|
||||
def update(self, trans):
|
||||
# Merge if plural function is the same, else prepend.
|
||||
for cat, plural in zip(self._catalogs, self._plurals):
|
||||
if trans.plural.__code__ == plural.__code__:
|
||||
cat.update(trans._catalog)
|
||||
break
|
||||
else:
|
||||
self._catalogs.insert(0, trans._catalog)
|
||||
self._plurals.insert(0, trans.plural)
|
||||
|
||||
def get(self, key, default=None):
|
||||
missing = object()
|
||||
for cat in self._catalogs:
|
||||
result = cat.get(key, missing)
|
||||
if result is not missing:
|
||||
return result
|
||||
return default
|
||||
|
||||
def plural(self, msgid, num):
|
||||
for cat, plural in zip(self._catalogs, self._plurals):
|
||||
tmsg = cat.get((msgid, plural(num)))
|
||||
if tmsg is not None:
|
||||
return tmsg
|
||||
raise KeyError
|
||||
|
||||
|
||||
class DjangoTranslation(gettext_module.GNUTranslations):
|
||||
"""
|
||||
Set up the GNUTranslations context with regard to output charset.
|
||||
|
||||
This translation object will be constructed out of multiple GNUTranslations
|
||||
objects by merging their catalogs. It will construct an object for the
|
||||
requested language and add a fallback to the default language, if it's
|
||||
different from the requested language.
|
||||
"""
|
||||
domain = 'django'
|
||||
|
||||
def __init__(self, language, domain=None, localedirs=None):
|
||||
"""Create a GNUTranslations() using many locale directories"""
|
||||
gettext_module.GNUTranslations.__init__(self)
|
||||
if domain is not None:
|
||||
self.domain = domain
|
||||
|
||||
self.__language = language
|
||||
self.__to_language = to_language(language)
|
||||
self.__locale = to_locale(language)
|
||||
self._catalog = None
|
||||
# If a language doesn't have a catalog, use the Germanic default for
|
||||
# pluralization: anything except one is pluralized.
|
||||
self.plural = lambda n: int(n != 1)
|
||||
|
||||
if self.domain == 'django':
|
||||
if localedirs is not None:
|
||||
# A module-level cache is used for caching 'django' translations
|
||||
warnings.warn("localedirs is ignored when domain is 'django'.", RuntimeWarning)
|
||||
localedirs = None
|
||||
self._init_translation_catalog()
|
||||
|
||||
if localedirs:
|
||||
for localedir in localedirs:
|
||||
translation = self._new_gnu_trans(localedir)
|
||||
self.merge(translation)
|
||||
else:
|
||||
self._add_installed_apps_translations()
|
||||
|
||||
self._add_local_translations()
|
||||
if self.__language == settings.LANGUAGE_CODE and self.domain == 'django' and self._catalog is None:
|
||||
# default lang should have at least one translation file available.
|
||||
raise OSError('No translation files found for default language %s.' % settings.LANGUAGE_CODE)
|
||||
self._add_fallback(localedirs)
|
||||
if self._catalog is None:
|
||||
# No catalogs found for this language, set an empty catalog.
|
||||
self._catalog = TranslationCatalog()
|
||||
|
||||
def __repr__(self):
|
||||
return "<DjangoTranslation lang:%s>" % self.__language
|
||||
|
||||
def _new_gnu_trans(self, localedir, use_null_fallback=True):
|
||||
"""
|
||||
Return a mergeable gettext.GNUTranslations instance.
|
||||
|
||||
A convenience wrapper. By default gettext uses 'fallback=False'.
|
||||
Using param `use_null_fallback` to avoid confusion with any other
|
||||
references to 'fallback'.
|
||||
"""
|
||||
return gettext_module.translation(
|
||||
domain=self.domain,
|
||||
localedir=localedir,
|
||||
languages=[self.__locale],
|
||||
fallback=use_null_fallback,
|
||||
)
|
||||
|
||||
def _init_translation_catalog(self):
|
||||
"""Create a base catalog using global django translations."""
|
||||
settingsfile = sys.modules[settings.__module__].__file__
|
||||
localedir = os.path.join(os.path.dirname(settingsfile), 'locale')
|
||||
translation = self._new_gnu_trans(localedir)
|
||||
self.merge(translation)
|
||||
|
||||
def _add_installed_apps_translations(self):
|
||||
"""Merge translations from each installed app."""
|
||||
try:
|
||||
app_configs = reversed(list(apps.get_app_configs()))
|
||||
except AppRegistryNotReady:
|
||||
raise AppRegistryNotReady(
|
||||
"The translation infrastructure cannot be initialized before the "
|
||||
"apps registry is ready. Check that you don't make non-lazy "
|
||||
"gettext calls at import time.")
|
||||
for app_config in app_configs:
|
||||
localedir = os.path.join(app_config.path, 'locale')
|
||||
if os.path.exists(localedir):
|
||||
translation = self._new_gnu_trans(localedir)
|
||||
self.merge(translation)
|
||||
|
||||
def _add_local_translations(self):
|
||||
"""Merge translations defined in LOCALE_PATHS."""
|
||||
for localedir in reversed(settings.LOCALE_PATHS):
|
||||
translation = self._new_gnu_trans(localedir)
|
||||
self.merge(translation)
|
||||
|
||||
def _add_fallback(self, localedirs=None):
|
||||
"""Set the GNUTranslations() fallback with the default language."""
|
||||
# Don't set a fallback for the default language or any English variant
|
||||
# (as it's empty, so it'll ALWAYS fall back to the default language)
|
||||
if self.__language == settings.LANGUAGE_CODE or self.__language.startswith('en'):
|
||||
return
|
||||
if self.domain == 'django':
|
||||
# Get from cache
|
||||
default_translation = translation(settings.LANGUAGE_CODE)
|
||||
else:
|
||||
default_translation = DjangoTranslation(
|
||||
settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs
|
||||
)
|
||||
self.add_fallback(default_translation)
|
||||
|
||||
def merge(self, other):
|
||||
"""Merge another translation into this catalog."""
|
||||
if not getattr(other, '_catalog', None):
|
||||
return # NullTranslations() has no _catalog
|
||||
if self._catalog is None:
|
||||
# Take plural and _info from first catalog found (generally Django's).
|
||||
self.plural = other.plural
|
||||
self._info = other._info.copy()
|
||||
self._catalog = TranslationCatalog(other)
|
||||
else:
|
||||
self._catalog.update(other)
|
||||
if other._fallback:
|
||||
self.add_fallback(other._fallback)
|
||||
|
||||
def language(self):
|
||||
"""Return the translation language."""
|
||||
return self.__language
|
||||
|
||||
def to_language(self):
|
||||
"""Return the translation language name."""
|
||||
return self.__to_language
|
||||
|
||||
def ngettext(self, msgid1, msgid2, n):
|
||||
try:
|
||||
tmsg = self._catalog.plural(msgid1, n)
|
||||
except KeyError:
|
||||
if self._fallback:
|
||||
return self._fallback.ngettext(msgid1, msgid2, n)
|
||||
if n == 1:
|
||||
tmsg = msgid1
|
||||
else:
|
||||
tmsg = msgid2
|
||||
return tmsg
|
||||
|
||||
|
||||
def translation(language):
|
||||
"""
|
||||
Return a translation object in the default 'django' domain.
|
||||
"""
|
||||
global _translations
|
||||
if language not in _translations:
|
||||
_translations[language] = DjangoTranslation(language)
|
||||
return _translations[language]
|
||||
|
||||
|
||||
def activate(language):
|
||||
"""
|
||||
Fetch the translation object for a given language and install it as the
|
||||
current translation object for the current thread.
|
||||
"""
|
||||
if not language:
|
||||
return
|
||||
_active.value = translation(language)
|
||||
|
||||
|
||||
def deactivate():
|
||||
"""
|
||||
Uninstall the active translation object so that further _() calls resolve
|
||||
to the default translation object.
|
||||
"""
|
||||
if hasattr(_active, "value"):
|
||||
del _active.value
|
||||
|
||||
|
||||
def deactivate_all():
|
||||
"""
|
||||
Make the active translation object a NullTranslations() instance. This is
|
||||
useful when we want delayed translations to appear as the original string
|
||||
for some reason.
|
||||
"""
|
||||
_active.value = gettext_module.NullTranslations()
|
||||
_active.value.to_language = lambda *args: None
|
||||
|
||||
|
||||
def get_language():
|
||||
"""Return the currently selected language."""
|
||||
t = getattr(_active, "value", None)
|
||||
if t is not None:
|
||||
try:
|
||||
return t.to_language()
|
||||
except AttributeError:
|
||||
pass
|
||||
# If we don't have a real translation object, assume it's the default language.
|
||||
return settings.LANGUAGE_CODE
|
||||
|
||||
|
||||
def get_language_bidi():
|
||||
"""
|
||||
Return selected language's BiDi layout.
|
||||
|
||||
* False = left-to-right layout
|
||||
* True = right-to-left layout
|
||||
"""
|
||||
lang = get_language()
|
||||
if lang is None:
|
||||
return False
|
||||
else:
|
||||
base_lang = get_language().split('-')[0]
|
||||
return base_lang in settings.LANGUAGES_BIDI
|
||||
|
||||
|
||||
def catalog():
|
||||
"""
|
||||
Return the current active catalog for further processing.
|
||||
This can be used if you need to modify the catalog or want to access the
|
||||
whole message catalog instead of just translating one string.
|
||||
"""
|
||||
global _default
|
||||
|
||||
t = getattr(_active, "value", None)
|
||||
if t is not None:
|
||||
return t
|
||||
if _default is None:
|
||||
_default = translation(settings.LANGUAGE_CODE)
|
||||
return _default
|
||||
|
||||
|
||||
def gettext(message):
|
||||
"""
|
||||
Translate the 'message' string. It uses the current thread to find the
|
||||
translation object to use. If no current translation is activated, the
|
||||
message will be run through the default translation object.
|
||||
"""
|
||||
global _default
|
||||
|
||||
eol_message = message.replace('\r\n', '\n').replace('\r', '\n')
|
||||
|
||||
if eol_message:
|
||||
_default = _default or translation(settings.LANGUAGE_CODE)
|
||||
translation_object = getattr(_active, "value", _default)
|
||||
|
||||
result = translation_object.gettext(eol_message)
|
||||
else:
|
||||
# Return an empty value of the corresponding type if an empty message
|
||||
# is given, instead of metadata, which is the default gettext behavior.
|
||||
result = type(message)('')
|
||||
|
||||
if isinstance(message, SafeData):
|
||||
return mark_safe(result)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def pgettext(context, message):
|
||||
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
|
||||
result = gettext(msg_with_ctxt)
|
||||
if CONTEXT_SEPARATOR in result:
|
||||
# Translation not found
|
||||
result = message
|
||||
elif isinstance(message, SafeData):
|
||||
result = mark_safe(result)
|
||||
return result
|
||||
|
||||
|
||||
def gettext_noop(message):
|
||||
"""
|
||||
Mark strings for translation but don't translate them now. This can be
|
||||
used to store strings in global variables that should stay in the base
|
||||
language (because they might be used externally) and will be translated
|
||||
later.
|
||||
"""
|
||||
return message
|
||||
|
||||
|
||||
def do_ntranslate(singular, plural, number, translation_function):
|
||||
global _default
|
||||
|
||||
t = getattr(_active, "value", None)
|
||||
if t is not None:
|
||||
return getattr(t, translation_function)(singular, plural, number)
|
||||
if _default is None:
|
||||
_default = translation(settings.LANGUAGE_CODE)
|
||||
return getattr(_default, translation_function)(singular, plural, number)
|
||||
|
||||
|
||||
def ngettext(singular, plural, number):
|
||||
"""
|
||||
Return a string of the translation of either the singular or plural,
|
||||
based on the number.
|
||||
"""
|
||||
return do_ntranslate(singular, plural, number, 'ngettext')
|
||||
|
||||
|
||||
def npgettext(context, singular, plural, number):
|
||||
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
|
||||
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
|
||||
number)
|
||||
result = ngettext(*msgs_with_ctxt)
|
||||
if CONTEXT_SEPARATOR in result:
|
||||
# Translation not found
|
||||
result = ngettext(singular, plural, number)
|
||||
return result
|
||||
|
||||
|
||||
def all_locale_paths():
|
||||
"""
|
||||
Return a list of paths to user-provides languages files.
|
||||
"""
|
||||
globalpath = os.path.join(
|
||||
os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
|
||||
app_paths = []
|
||||
for app_config in apps.get_app_configs():
|
||||
locale_path = os.path.join(app_config.path, 'locale')
|
||||
if os.path.exists(locale_path):
|
||||
app_paths.append(locale_path)
|
||||
return [globalpath, *settings.LOCALE_PATHS, *app_paths]
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=1000)
|
||||
def check_for_language(lang_code):
|
||||
"""
|
||||
Check whether there is a global language file for the given language
|
||||
code. This is used to decide whether a user-provided language is
|
||||
available.
|
||||
|
||||
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
|
||||
as the provided language codes are taken from the HTTP request. See also
|
||||
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
|
||||
"""
|
||||
# First, a quick check to make sure lang_code is well-formed (#21458)
|
||||
if lang_code is None or not language_code_re.search(lang_code):
|
||||
return False
|
||||
return any(
|
||||
gettext_module.find('django', path, [to_locale(lang_code)]) is not None
|
||||
for path in all_locale_paths()
|
||||
)
|
||||
|
||||
|
||||
@functools.lru_cache()
|
||||
def get_languages():
|
||||
"""
|
||||
Cache of settings.LANGUAGES in a dictionary for easy lookups by key.
|
||||
"""
|
||||
return dict(settings.LANGUAGES)
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=1000)
|
||||
def get_supported_language_variant(lang_code, strict=False):
|
||||
"""
|
||||
Return the language code that's listed in supported languages, possibly
|
||||
selecting a more generic variant. Raise LookupError if nothing is found.
|
||||
|
||||
If `strict` is False (the default), look for a country-specific variant
|
||||
when neither the language code nor its generic variant is found.
|
||||
|
||||
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
|
||||
as the provided language codes are taken from the HTTP request. See also
|
||||
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
|
||||
"""
|
||||
if lang_code:
|
||||
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
|
||||
possible_lang_codes = [lang_code]
|
||||
try:
|
||||
possible_lang_codes.extend(LANG_INFO[lang_code]['fallback'])
|
||||
except KeyError:
|
||||
pass
|
||||
generic_lang_code = lang_code.split('-')[0]
|
||||
possible_lang_codes.append(generic_lang_code)
|
||||
supported_lang_codes = get_languages()
|
||||
|
||||
for code in possible_lang_codes:
|
||||
if code in supported_lang_codes and check_for_language(code):
|
||||
return code
|
||||
if not strict:
|
||||
# if fr-fr is not supported, try fr-ca.
|
||||
for supported_code in supported_lang_codes:
|
||||
if supported_code.startswith(generic_lang_code + '-'):
|
||||
return supported_code
|
||||
raise LookupError(lang_code)
|
||||
|
||||
|
||||
def get_language_from_path(path, strict=False):
|
||||
"""
|
||||
Return the language code if there's a valid language code found in `path`.
|
||||
|
||||
If `strict` is False (the default), look for a country-specific variant
|
||||
when neither the language code nor its generic variant is found.
|
||||
"""
|
||||
regex_match = language_code_prefix_re.match(path)
|
||||
if not regex_match:
|
||||
return None
|
||||
lang_code = regex_match.group(1)
|
||||
try:
|
||||
return get_supported_language_variant(lang_code, strict=strict)
|
||||
except LookupError:
|
||||
return None
|
||||
|
||||
|
||||
def get_language_from_request(request, check_path=False):
|
||||
"""
|
||||
Analyze the request to find what language the user wants the system to
|
||||
show. Only languages listed in settings.LANGUAGES are taken into account.
|
||||
If the user requests a sublanguage where we have a main language, we send
|
||||
out the main language.
|
||||
|
||||
If check_path is True, the URL path prefix will be checked for a language
|
||||
code, otherwise this is skipped for backwards compatibility.
|
||||
"""
|
||||
if check_path:
|
||||
lang_code = get_language_from_path(request.path_info)
|
||||
if lang_code is not None:
|
||||
return lang_code
|
||||
|
||||
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
|
||||
if lang_code is not None and lang_code in get_languages() and check_for_language(lang_code):
|
||||
return lang_code
|
||||
|
||||
try:
|
||||
return get_supported_language_variant(lang_code)
|
||||
except LookupError:
|
||||
pass
|
||||
|
||||
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
|
||||
for accept_lang, unused in parse_accept_lang_header(accept):
|
||||
if accept_lang == '*':
|
||||
break
|
||||
|
||||
if not language_code_re.search(accept_lang):
|
||||
continue
|
||||
|
||||
try:
|
||||
return get_supported_language_variant(accept_lang)
|
||||
except LookupError:
|
||||
continue
|
||||
|
||||
try:
|
||||
return get_supported_language_variant(settings.LANGUAGE_CODE)
|
||||
except LookupError:
|
||||
return settings.LANGUAGE_CODE
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=1000)
|
||||
def parse_accept_lang_header(lang_string):
|
||||
"""
|
||||
Parse the lang_string, which is the body of an HTTP Accept-Language
|
||||
header, and return a tuple of (lang, q-value), ordered by 'q' values.
|
||||
|
||||
Return an empty tuple if there are any format errors in lang_string.
|
||||
"""
|
||||
result = []
|
||||
pieces = accept_language_re.split(lang_string.lower())
|
||||
if pieces[-1]:
|
||||
return ()
|
||||
for i in range(0, len(pieces) - 1, 3):
|
||||
first, lang, priority = pieces[i:i + 3]
|
||||
if first:
|
||||
return ()
|
||||
if priority:
|
||||
priority = float(priority)
|
||||
else:
|
||||
priority = 1.0
|
||||
result.append((lang, priority))
|
||||
result.sort(key=lambda k: k[1], reverse=True)
|
||||
return tuple(result)
|
||||
124
venv/lib/python3.8/site-packages/django/utils/tree.py
Normal file
124
venv/lib/python3.8/site-packages/django/utils/tree.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""
|
||||
A class for storing a tree graph. Primarily used for filter constructs in the
|
||||
ORM.
|
||||
"""
|
||||
|
||||
import copy
|
||||
|
||||
from django.utils.hashable import make_hashable
|
||||
|
||||
|
||||
class Node:
|
||||
"""
|
||||
A single internal node in the tree graph. A Node should be viewed as a
|
||||
connection (the root) with the children being either leaf nodes or other
|
||||
Node instances.
|
||||
"""
|
||||
# Standard connector type. Clients usually won't use this at all and
|
||||
# subclasses will usually override the value.
|
||||
default = 'DEFAULT'
|
||||
|
||||
def __init__(self, children=None, connector=None, negated=False):
|
||||
"""Construct a new Node. If no connector is given, use the default."""
|
||||
self.children = children[:] if children else []
|
||||
self.connector = connector or self.default
|
||||
self.negated = negated
|
||||
|
||||
# Required because django.db.models.query_utils.Q. Q. __init__() is
|
||||
# problematic, but it is a natural Node subclass in all other respects.
|
||||
@classmethod
|
||||
def _new_instance(cls, children=None, connector=None, negated=False):
|
||||
"""
|
||||
Create a new instance of this class when new Nodes (or subclasses) are
|
||||
needed in the internal code in this class. Normally, it just shadows
|
||||
__init__(). However, subclasses with an __init__ signature that aren't
|
||||
an extension of Node.__init__ might need to implement this method to
|
||||
allow a Node to create a new instance of them (if they have any extra
|
||||
setting up to do).
|
||||
"""
|
||||
obj = Node(children, connector, negated)
|
||||
obj.__class__ = cls
|
||||
return obj
|
||||
|
||||
def __str__(self):
|
||||
template = '(NOT (%s: %s))' if self.negated else '(%s: %s)'
|
||||
return template % (self.connector, ', '.join(str(c) for c in self.children))
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s>" % (self.__class__.__name__, self)
|
||||
|
||||
def __deepcopy__(self, memodict):
|
||||
obj = Node(connector=self.connector, negated=self.negated)
|
||||
obj.__class__ = self.__class__
|
||||
obj.children = copy.deepcopy(self.children, memodict)
|
||||
return obj
|
||||
|
||||
def __len__(self):
|
||||
"""Return the number of children this node has."""
|
||||
return len(self.children)
|
||||
|
||||
def __bool__(self):
|
||||
"""Return whether or not this node has children."""
|
||||
return bool(self.children)
|
||||
|
||||
def __contains__(self, other):
|
||||
"""Return True if 'other' is a direct child of this instance."""
|
||||
return other in self.children
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
self.__class__ == other.__class__ and
|
||||
(self.connector, self.negated) == (other.connector, other.negated) and
|
||||
self.children == other.children
|
||||
)
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.__class__, self.connector, self.negated, *make_hashable(self.children)))
|
||||
|
||||
def add(self, data, conn_type, squash=True):
|
||||
"""
|
||||
Combine this tree and the data represented by data using the
|
||||
connector conn_type. The combine is done by squashing the node other
|
||||
away if possible.
|
||||
|
||||
This tree (self) will never be pushed to a child node of the
|
||||
combined tree, nor will the connector or negated properties change.
|
||||
|
||||
Return a node which can be used in place of data regardless if the
|
||||
node other got squashed or not.
|
||||
|
||||
If `squash` is False the data is prepared and added as a child to
|
||||
this tree without further logic.
|
||||
"""
|
||||
if data in self.children:
|
||||
return data
|
||||
if not squash:
|
||||
self.children.append(data)
|
||||
return data
|
||||
if self.connector == conn_type:
|
||||
# We can reuse self.children to append or squash the node other.
|
||||
if (isinstance(data, Node) and not data.negated and
|
||||
(data.connector == conn_type or len(data) == 1)):
|
||||
# We can squash the other node's children directly into this
|
||||
# node. We are just doing (AB)(CD) == (ABCD) here, with the
|
||||
# addition that if the length of the other node is 1 the
|
||||
# connector doesn't matter. However, for the len(self) == 1
|
||||
# case we don't want to do the squashing, as it would alter
|
||||
# self.connector.
|
||||
self.children.extend(data.children)
|
||||
return self
|
||||
else:
|
||||
# We could use perhaps additional logic here to see if some
|
||||
# children could be used for pushdown here.
|
||||
self.children.append(data)
|
||||
return data
|
||||
else:
|
||||
obj = self._new_instance(self.children, self.connector,
|
||||
self.negated)
|
||||
self.connector = conn_type
|
||||
self.children = [obj, data]
|
||||
return data
|
||||
|
||||
def negate(self):
|
||||
"""Negate the sense of the root connector."""
|
||||
self.negated = not self.negated
|
||||
104
venv/lib/python3.8/site-packages/django/utils/version.py
Normal file
104
venv/lib/python3.8/site-packages/django/utils/version.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import datetime
|
||||
import functools
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
# Private, stable API for detecting the Python version. PYXY means "Python X.Y
|
||||
# or later". So that third-party apps can use these values, each constant
|
||||
# should remain as long as the oldest supported Django version supports that
|
||||
# Python version.
|
||||
PY36 = sys.version_info >= (3, 6)
|
||||
PY37 = sys.version_info >= (3, 7)
|
||||
PY38 = sys.version_info >= (3, 8)
|
||||
PY39 = sys.version_info >= (3, 9)
|
||||
|
||||
|
||||
def get_version(version=None):
|
||||
"""Return a PEP 440-compliant version number from VERSION."""
|
||||
version = get_complete_version(version)
|
||||
|
||||
# Now build the two parts of the version number:
|
||||
# main = X.Y[.Z]
|
||||
# sub = .devN - for pre-alpha releases
|
||||
# | {a|b|rc}N - for alpha, beta, and rc releases
|
||||
|
||||
main = get_main_version(version)
|
||||
|
||||
sub = ''
|
||||
if version[3] == 'alpha' and version[4] == 0:
|
||||
git_changeset = get_git_changeset()
|
||||
if git_changeset:
|
||||
sub = '.dev%s' % git_changeset
|
||||
|
||||
elif version[3] != 'final':
|
||||
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
|
||||
sub = mapping[version[3]] + str(version[4])
|
||||
|
||||
return main + sub
|
||||
|
||||
|
||||
def get_main_version(version=None):
|
||||
"""Return main version (X.Y[.Z]) from VERSION."""
|
||||
version = get_complete_version(version)
|
||||
parts = 2 if version[2] == 0 else 3
|
||||
return '.'.join(str(x) for x in version[:parts])
|
||||
|
||||
|
||||
def get_complete_version(version=None):
|
||||
"""
|
||||
Return a tuple of the django version. If version argument is non-empty,
|
||||
check for correctness of the tuple provided.
|
||||
"""
|
||||
if version is None:
|
||||
from django import VERSION as version
|
||||
else:
|
||||
assert len(version) == 5
|
||||
assert version[3] in ('alpha', 'beta', 'rc', 'final')
|
||||
|
||||
return version
|
||||
|
||||
|
||||
def get_docs_version(version=None):
|
||||
version = get_complete_version(version)
|
||||
if version[3] != 'final':
|
||||
return 'dev'
|
||||
else:
|
||||
return '%d.%d' % version[:2]
|
||||
|
||||
|
||||
@functools.lru_cache()
|
||||
def get_git_changeset():
|
||||
"""Return a numeric identifier of the latest git changeset.
|
||||
|
||||
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
|
||||
This value isn't guaranteed to be unique, but collisions are very unlikely,
|
||||
so it's sufficient for generating the development version numbers.
|
||||
"""
|
||||
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
git_log = subprocess.run(
|
||||
['git', 'log', '--pretty=format:%ct', '--quiet', '-1', 'HEAD'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
shell=True, cwd=repo_dir, universal_newlines=True,
|
||||
)
|
||||
timestamp = git_log.stdout
|
||||
try:
|
||||
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
|
||||
except ValueError:
|
||||
return None
|
||||
return timestamp.strftime('%Y%m%d%H%M%S')
|
||||
|
||||
|
||||
def get_version_tuple(version):
|
||||
"""
|
||||
Return a tuple of version numbers (e.g. (1, 2, 3)) from the version
|
||||
string (e.g. '1.2.3').
|
||||
"""
|
||||
loose_version = LooseVersion(version)
|
||||
version_numbers = []
|
||||
for item in loose_version.version:
|
||||
if not isinstance(item, int):
|
||||
break
|
||||
version_numbers.append(item)
|
||||
return tuple(version_numbers)
|
||||
33
venv/lib/python3.8/site-packages/django/utils/xmlutils.py
Normal file
33
venv/lib/python3.8/site-packages/django/utils/xmlutils.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""
|
||||
Utilities for XML generation/parsing.
|
||||
"""
|
||||
|
||||
import re
|
||||
from xml.sax.saxutils import XMLGenerator
|
||||
|
||||
|
||||
class UnserializableContentError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class SimplerXMLGenerator(XMLGenerator):
|
||||
def addQuickElement(self, name, contents=None, attrs=None):
|
||||
"Convenience method for adding an element with no children"
|
||||
if attrs is None:
|
||||
attrs = {}
|
||||
self.startElement(name, attrs)
|
||||
if contents is not None:
|
||||
self.characters(contents)
|
||||
self.endElement(name)
|
||||
|
||||
def characters(self, content):
|
||||
if content and re.search(r'[\x00-\x08\x0B-\x0C\x0E-\x1F]', content):
|
||||
# Fail loudly when content has control chars (unsupported in XML 1.0)
|
||||
# See https://www.w3.org/International/questions/qa-controls
|
||||
raise UnserializableContentError("Control characters are not supported in XML 1.0")
|
||||
XMLGenerator.characters(self, content)
|
||||
|
||||
def startElement(self, name, attrs):
|
||||
# Sort attrs for a deterministic output.
|
||||
sorted_attrs = dict(sorted(attrs.items())) if attrs else attrs
|
||||
super().startElement(name, sorted_attrs)
|
||||
Reference in New Issue
Block a user