diff --git a/sideboard/__init__.py b/sideboard/__init__.py index 01d9146..ef3ee86 100644 --- a/sideboard/__init__.py +++ b/sideboard/__init__.py @@ -8,9 +8,6 @@ import sideboard.server from sideboard.internal.imports import _discover_plugins -from sideboard.internal.logging import _configure_logging -import sideboard.run_mainloop if 'SIDEBOARD_MODULE_TESTING' not in os.environ: _discover_plugins() - _configure_logging() diff --git a/sideboard/config.py b/sideboard/config.py index 21921d5..5d47ba4 100755 --- a/sideboard/config.py +++ b/sideboard/config.py @@ -12,10 +12,6 @@ from validate import Validator -class ConfigurationError(RuntimeError): - pass - - def get_module_and_root_dirs(requesting_file_path, is_plugin): """ Returns the "module_root" and "root" directories for the given file path. @@ -185,7 +181,7 @@ def parse_config(requesting_file_path, is_plugin=True): unlink(temp_name) if validation is not True: - raise ConfigurationError('configuration validation error(s) (): {!r}'.format( + raise RuntimeError('configuration validation error(s) (): {!r}'.format( configobj.flatten_errors(config, validation)) ) diff --git a/sideboard/configspec.ini b/sideboard/configspec.ini index 9d5d439..b357730 100644 --- a/sideboard/configspec.ini +++ b/sideboard/configspec.ini @@ -32,13 +32,6 @@ ssl_version = string(default="PROTOCOL_TLSv1") # in with any username using this password. debug_password = string(default="testpassword") -# Sideboard has numerous background threads which wait on sideboard.lib.stopped -# to either sleep or bail immediately on shutdown. Since these threads wait in -# a loop, we don't want to set an interval too small or we'll eat a lot of CPU -# while doing absolutely nothing. A hard-coded value of 1 second would probably -# be fine for all workloads, but we've made it configurable just in case. -thread_wait_interval = float(default=1) - # Plugins can register different authenticators, since different applications may # have different ideas about what it means to be "logged in". The default # authenticator is mainly used for the /ws and /json RPC endpoints, so this @@ -87,8 +80,6 @@ server.socket_host = string(default="127.0.0.1") server.socket_port = integer(default=80) server.thread_pool = integer(default=10) -tools.reset_threadlocal.on = boolean(default=True) - tools.sessions.on = boolean(default=True) tools.sessions.path = string(default="/") tools.sessions.timeout = integer(default=60) diff --git a/sideboard/internal/autolog.py b/sideboard/internal/autolog.py deleted file mode 100755 index 7508f14..0000000 --- a/sideboard/internal/autolog.py +++ /dev/null @@ -1,243 +0,0 @@ -from __future__ import absolute_import, unicode_literals -import sys -import logging -import inspect - - -class EagerFormattingAdapter(logging.LoggerAdapter): - """ - A `logging.LoggerAdapter` that add unterpolation support but performs the - evaluation immediately if the appropriate loglevel is set. - """ - - def __init__(self, logger, extra=None): - """ - Initialize the adapter with a logger and a dict-like object which - provides contextual information. This constructor signature allows - easy stacking of LoggerAdapters, if so desired. - - You can effectively pass keyword arguments as shown in the - following example: - - adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) - """ - self.logger = logger - self.extra = extra - - def _eagerFormat(self, msg, level, args): - """ - Eagerly apply log formatting if the appropriate level is enabled. - - Otherwise we just drop the log message (and return a string indicating - that it was suppreseed). - """ - if self.isEnabledFor(level): - # Do the string formatting immediately. - if args: - return self._getUnterpolatedMessage(msg, args) - else: - return msg - else: - # Otherwise, just drop the message completely to avoid anything going - # wrong in the future. This text shoudl clue one in to what's going - # on in the bizarre edge case where this ever does show up. - return '(log message suppressed due to insufficient log level)' - - def _getUnterpolatedMessage(self, msg, args): - """ - Returns the formatted string, will first attempt str.format and will - fallback to msg % args as it was originally. - - This is lifted almost wholesale from logging_unterpolation. - """ - original_msg = msg - if isinstance(args, dict): - # special case handing for unpatched logging supporting - # statements like: - # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) - args = (args,) - - try: - msg = msg.format(*args) - except UnicodeEncodeError: - # This is most likely due to formatting a non-ascii string argument - # into a bytestring, which the %-operator automatically handles - # by casting the left side (the "msg" variable) in this context - # to unicode. So we'll do that here - - if sys.version_info >= (3, 0,): - # this is most likely unnecessary on python 3, but it's here - # for completeness, in the case of someone manually creating - # a bytestring - unicode_type = str - else: - unicode_type = unicode - - # handle the attempt to print utf-8 encoded data, similar to - # %-interpolation's handling of unicode formatting non-ascii - # strings - msg = unicode_type(msg).format(*args) - - except ValueError: - # From PEP-3101, value errors are of the type raised by the format - # method itself, so see if we should fall back to original - # formatting since there was an issue - if '%' in msg: - msg = msg % args - else: - # we should NOT fall back, since there's no possible string - # interpolation happening and we want a meaningful error - # message - raise - - if msg == original_msg and '%' in msg: - # there must have been no string formatting methods - # used, given the presence of args without a change in the msg - # fall back to original formatting, including the special case - # for one passed dictionary argument - msg = msg % args - - return msg - - def debug(self, msg, *args, **kwargs): - """ - Delegate a debug call to the underlying logger, after adding - contextual information from this adapter instance. - """ - self.log(logging.DEBUG, msg, *args, **kwargs) - - def info(self, msg, *args, **kwargs): - """ - Delegate an info call to the underlying logger, after adding - contextual information from this adapter instance. - """ - self.log(logging.INFO, msg, *args, **kwargs) - - def warning(self, msg, *args, **kwargs): - """ - Delegate a warning call to the underlying logger, after adding - contextual information from this adapter instance. - """ - self.log(logging.WARNING, msg, *args, **kwargs) - - def warn(self, msg, *args, **kwargs): - """ - Delegate a warning call to the underlying logger, after adding - contextual information from this adapter instance. - """ - self.log(logging.WARNING, msg, *args, **kwargs) - - def error(self, msg, *args, **kwargs): - """ - Delegate an error call to the underlying logger, after adding - contextual information from this adapter instance. - """ - self.log(logging.ERROR, msg, *args, **kwargs) - - def exception(self, msg, *args, **kwargs): - """ - Delegate an exception call to the underlying logger, after adding - contextual information from this adapter instance. - """ - kwargs["exc_info"] = 1 - self.log(logging.ERROR, msg, *args, **kwargs) - - def critical(self, msg, *args, **kwargs): - """ - Delegate a critical call to the underlying logger, after adding - contextual information from this adapter instance. - """ - self.log(logging.CRITICAL, msg, *args, **kwargs) - - def log(self, level, msg, *args, **kwargs): - """ - Delegate a log call to the underlying logger, after adding - contextual information from this adapter instance. - """ - msg, kwargs = self.process(msg, kwargs) - # We explicitly do not pass the args into the log method here, since - # they should be "used up" by the eagerFormat method. - self.logger.log(level, self._eagerFormat(msg, level, args), **kwargs) - - -class AutoLogger(object): - """ - A logger proxy object, with all of the methods and attributes of C{Logger}. - - When an attribute (e.g., "debug") is requested, inspects the stack for the - calling module's name, and passes that name to C{logging.getLogger}. - - What this means is that you can instantiate an C{AutoLogger} anywhere, and - when you call it, the log entry shows the module where you called it, not - where it was created. - - C{AutoLogger} also inspects the local variables where it is called, looking - for C{self}. If C{self} exists, its classname is added to the module name. - """ - - def __init__(self, adapter_class=None, adapter_args=None, - adapter_kwargs=None): - if adapter_args is None: - adapter_args = [] - if adapter_kwargs is None: - adapter_kwargs = {} - - self.adapter_class = adapter_class - self.adapter_args = adapter_args - self.adapter_kwargs = adapter_kwargs - - def __getattr__(self, name): - f_locals = inspect.currentframe().f_back.f_locals - if 'self' in f_locals and f_locals['self'] is not None: - other = f_locals['self'] - caller_name = '%s.%s' % (other.__class__.__module__, other.__class__.__name__) - else: - caller_name = inspect.currentframe().f_back.f_globals['__name__'] - logger = logging.getLogger(caller_name) - - if self.adapter_class: - logger = self.adapter_class(logger, *self.adapter_args, - **self.adapter_kwargs) - - return getattr(logger, name) - - -log = AutoLogger() - - -def log_exceptions(fn): - """ A decorator designed to wrap a function and log any exception that method produces. - - The exception will still be raised after being logged. - - Also logs (at the trace level) the arguments to every call. - - Currently this is only designed for module-level functions. Not sure what happens if a method is decorated - with this (since logger is resolved from module name). - """ - - def wrapper(*args, **kwargs): - try: - a = args or [] - a = [str(x)[:255] for x in a] - kw = kwargs or {} - kw = {str(k)[:255]: str(v)[:255] for k, v in kw.items()} - log.debug('Calling %s.%s %r %r' % (fn.__module__, fn.__name__, a, kw)) - return fn(*args, **kwargs) - except Exception as e: - log.error('Error calling function %s: %s' % (fn.__name__, e)) - log.exception(e) - raise - - wrapper.__name__ = fn.__name__ - return wrapper - - -TRACE_LEVEL = 5 -logging.addLevelName(TRACE_LEVEL, "TRACE") - - -def trace(self, message, *args, **kws): - # Yes, logger takes its '*args' as 'args'. - self._log(TRACE_LEVEL, message, args, **kws) -logging.Logger.trace = trace diff --git a/sideboard/internal/logging.py b/sideboard/internal/logging.py deleted file mode 100644 index 6dbe03c..0000000 --- a/sideboard/internal/logging.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import unicode_literals, absolute_import -import logging.config - -from sideboard.config import config - - -class IndentMultilinesLogFormatter(logging.Formatter): - """ - Provide a formatter (unused by default) which adds indentation to messages - which are split across multiple lines. - """ - def format(self, record): - s = super(IndentMultilinesLogFormatter, self).format(record) - # indent all lines that start with a newline so they are easier for external log programs to parse - s = s.rstrip('\n').replace('\n', '\n ') - return s - - -def _configure_logging(): - # ConfigObj doesn't support interpolation escaping, so we manually work around it here - formatters = config['formatters'].dict() - for formatter in formatters.values(): - formatter['format'] = formatter['format'].replace('$$', '%') - formatter['datefmt'] = formatter['datefmt'].replace('$$', '%') or None - formatters['indent_multiline'] = { - '()': IndentMultilinesLogFormatter, - 'format': formatters['default']['format'] - } - logging.config.dictConfig({ - 'version': 1, - 'root': { - 'level': config['loggers']['root'], - 'handlers': config['handlers'].dict().keys() - }, - 'loggers': { - name: {'level': level} - for name, level in config['loggers'].items() if name != 'root' - }, - 'handlers': config['handlers'].dict(), - 'formatters': formatters - }) diff --git a/sideboard/lib/__init__.py b/sideboard/lib/__init__.py index 5f31f26..c7e743c 100644 --- a/sideboard/lib/__init__.py +++ b/sideboard/lib/__init__.py @@ -1,16 +1,9 @@ from __future__ import unicode_literals -from sideboard.internal.autolog import log -from sideboard.config import config, ConfigurationError, parse_config -from sideboard.lib._utils import is_listy, listify, serializer, cached_property, request_cached_property, class_property, entry_point, RWGuard -from sideboard.lib._cp import stopped, on_startup, on_shutdown, mainloop, ajax, renders_template, restricted, all_restricted, register_authenticator -from sideboard.lib._threads import threadlocal +from sideboard.config import parse_config, config +from sideboard.lib._utils import serializer, entry_point +import sideboard.lib._redissession -__all__ = ['log', - 'ConfigurationError', 'parse_config', - 'is_listy', 'listify', 'serializer', 'cached_property', 'class_property', 'entry_point', - 'stopped', 'on_startup', 'on_shutdown', 'mainloop', 'ajax', 'renders_template', - 'restricted', 'all_restricted', 'register_authenticator', - 'threadlocal', - 'listify', 'serializer', 'cached_property', 'request_cached_property', 'is_listy', 'entry_point', 'RWGuard'] +__all__ = ['parse_config', 'config', + 'serializer', 'entry_point'] diff --git a/sideboard/lib/_cp.py b/sideboard/lib/_cp.py deleted file mode 100644 index 4b0e9b1..0000000 --- a/sideboard/lib/_cp.py +++ /dev/null @@ -1,212 +0,0 @@ -from __future__ import unicode_literals -import json -from threading import Event -from functools import wraps -from collections import defaultdict - -import cherrypy - -from sideboard.lib._redissession import RedisSession -cherrypy.lib.sessions.RedisSession = RedisSession - -import sideboard.lib -from sideboard.lib import log, config, serializer - -auth_registry = {} -_startup_registry = defaultdict(list) -_shutdown_registry = defaultdict(list) - - -def _on_startup(func, priority): - _startup_registry[priority].append(func) - return func - - -def _on_shutdown(func, priority): - _shutdown_registry[priority].append(func) - return func - - -def on_startup(func=None, priority=50): - """ - Register a function to be called when Sideboard starts. Startup functions - have a priority, and the functions are invoked in priority order, where - low-priority-numbered functions are invoked before higher numbers. - - Startup functions may be registered in one of three ways: - - 1) A function can be passed directly, e.g. - on_startup(callback_function) - on_startup(callback_function, priority=25) - - 2) This function can be used as a decorator, e.g. - @on_startup - def callback_function(): - ... - - 3) This function can be used as a decorator with a priority value, e.g. - @on_startup(priority=25) - def callback_function(): - ... - - If instead of running a function when Sideboard starts, you need to run a - function immediately after Sideboard loads your plugin, you may optionally - declare an on_load() function in your plugin's top-level __init__.py - module. If it exists, Sideboard will call on_load() immediately after - loading the plugin, before attempting to load any subsequent plugins. - - """ - if func: - return _on_startup(func, priority) - else: - return lambda func: _on_startup(func, priority) - - -def on_shutdown(func=None, priority=50): - """ - Register a function to be called when Sideboard exits. See the on_startup - function above for how this is used. - """ - if func: - return _on_shutdown(func, priority) - else: - return lambda func: _on_shutdown(func, priority) - - -def _run_startup(): - for priority, functions in sorted(_startup_registry.items()): - for func in functions: - func() - - -def _run_shutdown(): - for priority, functions in sorted(_shutdown_registry.items()): - for func in functions: - try: - func() - except Exception: - log.warning('Ignored exception during shutdown', exc_info=True) - -stopped = Event() -on_startup(stopped.clear, priority=0) -on_shutdown(stopped.set, priority=0) - -cherrypy.engine.subscribe('start', _run_startup, priority=98) -cherrypy.engine.subscribe('stop', _run_shutdown, priority=98) - - -def mainloop(): - """ - This function exists for Sideboard plugins which do not run CherryPy. It - runs all of the functions registered with sideboard.lib.on_startup and then - waits for shutdown, at which point it runs all functions registered with - sideboard.lib.on_shutdown. - """ - _run_startup() - try: - while not stopped.is_set(): - try: - stopped.wait(config['thread_wait_interval']) - except KeyboardInterrupt: - break - finally: - _run_shutdown() - - -def ajax(method): - """ - Decorator for CherryPy page handler methods which sets the Content-Type - to application/json and serializes your function's return value to json. - """ - @wraps(method) - def to_json(self, *args, **kwargs): - cherrypy.response.headers['Content-Type'] = 'application/json' - return json.dumps(method(self, *args, **kwargs), cls=sideboard.lib.serializer) - return to_json - - -def restricted(x): - """ - Decorator for CherryPy page handler methods. This can either be called - to provide an authenticator ident or called directly as a decorator, e.g. - - @restricted - def some_page(self): ... - - is equivalent to - - @restricted(sideboard.lib.config['default_authenticator']) - def some_page(self): ... - """ - def make_decorator(ident): - def decorator(func): - @cherrypy.expose - @wraps(func) - def with_checking(*args, **kwargs): - if not auth_registry[ident]['check'](): - raise cherrypy.HTTPRedirect(auth_registry[ident]['login_path']) - else: - return func(*args, **kwargs) - return with_checking - return decorator - - if hasattr(x, '__call__'): - return make_decorator(config['default_authenticator'])(x) - else: - return make_decorator(x) - - -def renders_template(method): - """ - Decorator for CherryPy page handler methods implementing default behaviors: - - if your page handler returns a string, return that un-modified - - if your page handler returns a non-jsonrpc dictionary, render a template - with that dictionary; the function my_page will render my_page.html - """ - @cherrypy.expose - @wraps(method) - def renderer(self, *args, **kwargs): - output = method(self, *args, **kwargs) - if isinstance(output, dict) and output.get('jsonrpc') != '2.0': - return self.env.get_template(method.__name__ + '.html').render(**output) - else: - return output - return renderer - - -# Lifted from Jinja2 docs. See http://jinja.pocoo.org/docs/api/#autoescaping -def _guess_autoescape(template_name): - if template_name is None or '.' not in template_name: - return False - ext = template_name.rsplit('.', 1)[1] - return ext in ('html', 'htm', 'xml') - - -class all_restricted(object): - """Invokes the @restricted decorator on all methods of a class.""" - def __init__(self, ident): - self.ident = ident - assert ident in auth_registry, '{!r} is not a recognized authenticator'.format(ident) - - def __call__(self, klass): - for name, func in list(klass.__dict__.items()): - if hasattr(func, '__call__'): - setattr(klass, name, restricted(self.ident)(func)) - return klass - - -def register_authenticator(ident, login_path, checker): - """ - Register a new authenticator, which consists of three things: - - A string ident, used to identify the authenticator in @restricted calls. - - The path to the login page we should redirect to when not authenticated. - - A function callable with no parameters which returns a truthy value if the - user is logged in and a falsey value if they are not. - """ - assert ident not in auth_registry, '{} is already a registered authenticator'.format(ident) - auth_registry[ident] = { - 'check': checker, - 'login_path': login_path - } - -register_authenticator('default', '/login', lambda: 'username' in cherrypy.session) diff --git a/sideboard/lib/_redissession.py b/sideboard/lib/_redissession.py index 548778d..39cb651 100644 --- a/sideboard/lib/_redissession.py +++ b/sideboard/lib/_redissession.py @@ -7,6 +7,7 @@ from cherrypy.lib.sessions import Session import redis +import cherrypy from redis import Sentinel class RedisSession(Session): @@ -85,4 +86,6 @@ def acquire_lock(self): def release_lock(self): """Release the lock on the currently-loaded session data.""" self.locks[self.prefix+self.id].release() - self.locked = False \ No newline at end of file + self.locked = False + +cherrypy.lib.sessions.RedisSession = RedisSession diff --git a/sideboard/lib/_threads.py b/sideboard/lib/_threads.py index afcfae9..0b95fbd 100644 --- a/sideboard/lib/_threads.py +++ b/sideboard/lib/_threads.py @@ -2,11 +2,9 @@ import sys import ctypes, ctypes.util import psutil -import platform import traceback import threading -from sideboard.lib import log, config, on_startup, on_shutdown, class_property from sideboard.debugging import register_diagnostics_status_function # Replaces the prior prctl implementation with a direct call to pthread to change thread names @@ -89,77 +87,3 @@ def general_system_info(): out += ['Mem: ' + repr(psutil.virtual_memory()) if psutil else ''] out += ['Swap: ' + repr(psutil.swap_memory()) if psutil else ''] return '\n'.join(out) - -class threadlocal(object): - """ - This class exposes a dict-like interface on top of the threading.local - utility class; the "get", "set", "setdefault", and "clear" methods work the - same as for a dict except that each thread gets its own keys and values. - - Sideboard clears out all existing values and then initializes some specific - values in the following situations: - - 1) CherryPy page handlers have the 'username' key set to whatever value is - returned by cherrypy.session['username']. - - 2) Service methods called via JSON-RPC have the following two fields set: - -> username: as above - -> websocket_client: if the JSON-RPC request has a "websocket_client" - field, it's value is set here; this is used internally as the - "originating_client" value in notify() and plugins can ignore this - - 3) Service methods called via websocket have the following three fields set: - -> username: as above - -> websocket: the WebSocketDispatcher instance receiving the RPC call - -> client_data: see the client_data property below for an explanation - -> message: the RPC request body; this is present on the initial call - but not on subscription triggers in the broadcast thread - """ - _threadlocal = threading.local() - - @classmethod - def get(cls, key, default=None): - return getattr(cls._threadlocal, key, default) - - @classmethod - def set(cls, key, val): - return setattr(cls._threadlocal, key, val) - - @classmethod - def setdefault(cls, key, val): - val = cls.get(key, val) - cls.set(key, val) - return val - - @classmethod - def clear(cls): - cls._threadlocal.__dict__.clear() - - @classmethod - def get_client(cls): - """ - If called as part of an initial websocket RPC request, this returns the - client id if one exists, and otherwise returns None. Plugins probably - shouldn't need to call this method themselves. - """ - return cls.get('client') or cls.get('message', {}).get('client') - - @classmethod - def reset(cls, **kwargs): - """ - Plugins should never call this method directly without a good reason; it - clears out all existing values and replaces them with the key-value - pairs passed as keyword arguments to this function. - """ - cls.clear() - for key, val in kwargs.items(): - cls.set(key, val) - - @class_property - def client_data(cls): - """ - This propery is basically the websocket equivalent of cherrypy.session; - it's a dictionary where your service methods can place data which you'd - like to use in subsequent method calls. - """ - return cls.setdefault('client_data', {}) \ No newline at end of file diff --git a/sideboard/lib/_utils.py b/sideboard/lib/_utils.py index 18a09ca..1225d0e 100644 --- a/sideboard/lib/_utils.py +++ b/sideboard/lib/_utils.py @@ -1,29 +1,6 @@ from __future__ import unicode_literals -import os import json -from functools import wraps from datetime import datetime, date -from contextlib import contextmanager -from threading import RLock, Condition, current_thread -from collections.abc import Sized, Iterable, Mapping -from collections import defaultdict - - -def is_listy(x): - """ - returns a boolean indicating whether the passed object is "listy", - which we define as a sized iterable which is not a map or string - """ - return isinstance(x, Sized) and isinstance(x, Iterable) and not isinstance(x, (Mapping, type(b''), type(''))) - - -def listify(x): - """ - returns a list version of x if x is a non-string iterable, otherwise - returns a list with x as its only element - """ - return list(x) if is_listy(x) else [x] - class serializer(json.JSONEncoder): """ @@ -72,56 +49,6 @@ def register(cls, type, preprocessor): serializer.register(set, lambda s: sorted(list(s))) -def cached_property(func): - """decorator for making readonly, memoized properties""" - pname = '_cached_{}'.format(func.__name__) - - @property - @wraps(func) - def caching(self, *args, **kwargs): - if not hasattr(self, pname): - setattr(self, pname, func(self, *args, **kwargs)) - return getattr(self, pname) - return caching - - -def request_cached_property(func): - """ - Sometimes we want a property to be cached for the duration of a request, - with concurrent requests each having their own cached version. This does - that via the threadlocal class, such that each HTTP request CherryPy serves - and each RPC request served via JSON-RPC will have its own - cached value, which is cleared and then re-generated on later requests. - """ - from sideboard.lib import threadlocal - name = func.__module__ + '.' + func.__name__ - - @property - @wraps(func) - def with_caching(self): - val = threadlocal.get(name) - if val is None: - val = func(self) - threadlocal.set(name, val) - return val - return with_caching - - -class _class_property(property): - def __get__(self, cls, owner): - return self.fget.__get__(None, owner)() - - -def class_property(cls): - """ - For whatever reason, the @property decorator isn't smart enough to recognize - classmethods and behave differently on them than on instance methods. This - property may be used to create a class-level property, useful for singletons - and other one-per-class properties. Class properties are read-only. - """ - return _class_property(classmethod(cls)) - - def entry_point(func): """ Decorator used to define entry points for command-line scripts. Sideboard @@ -151,113 +78,3 @@ def some_action(): return func _entry_points = {} - - -class RWGuard(object): - """ - This utility class provides the ability to perform read/write locking, such - that we can have any number of readers OR a single writer. We give priority - to writers, who will get the lock before any readers. - - These locks are reentrant, meaning that the same thread can acquire a read - or write lock multiple times, and will then need to release the lock the - same number of times it was acquired. A thread with an acquired read lock - cannot acquire a write lock, or vice versa. Locks can only be released by - the threads which acquired them. - - This class is named RWGuard rather than RWLock because it is not itself a - lock, e.g. it doesn't have an acquire method, it cannot be directly used as - a context manager, etc. - """ - def __init__(self): - self.lock = RLock() - self.waiting_writer_count = 0 - self.acquired_writer = defaultdict(int) - self.acquired_readers = defaultdict(int) - self.ready_for_reads = Condition(self.lock) - self.ready_for_writes = Condition(self.lock) - - @property - @contextmanager - def read_locked(self): - """ - Context manager which acquires a read lock on entrance and releases it - on exit. Any number of threads may acquire a read lock. - """ - self.acquire_for_read() - try: - yield - finally: - self.release() - - @property - @contextmanager - def write_locked(self): - """ - Context manager which acquires a write lock on entrance and releases it - on exit. Only one thread may acquire a write lock at a time. - """ - self.acquire_for_write() - try: - yield - finally: - self.release() - - def acquire_for_read(self): - """ - NOTE: consumers are encouraged to use the "read_locked" context manager - instead of this method where possible. - - This method acquires the read lock for the current thread, blocking if - necessary until there are no other threads with the write lock acquired - or waiting for the write lock to be available. - """ - tid = current_thread().ident - assert tid not in self.acquired_writer, 'Threads which have already acquired a write lock may not lock for reading' - with self.lock: - while self.acquired_writer or (self.waiting_writer_count and tid not in self.acquired_readers): - self.ready_for_reads.wait() - self.acquired_readers[tid] += 1 - - def acquire_for_write(self): - """ - NOTE: consumers are encouraged to use the "write_locked" context manager - instead of this method where possible. - - This method acquires the write lock for the current thread, blocking if - necessary until no other threads have the write lock acquired and no - thread has the read lock acquired. - """ - tid = current_thread().ident - assert tid not in self.acquired_readers, 'Threads which have already acquired a read lock may not lock for writing' - with self.lock: - while self.acquired_readers or (self.acquired_writer and tid not in self.acquired_writer): - self.waiting_writer_count += 1 - self.ready_for_writes.wait() - self.waiting_writer_count -= 1 - self.acquired_writer[tid] += 1 - - def release(self): - """ - Release the read or write lock held by the current thread. Since these - locks are reentrant, this method must be called once for each time the - lock was acquired. This method raises an exception if called by a - thread with no read or write lock acquired. - """ - tid = current_thread().ident - assert tid in self.acquired_readers or tid in self.acquired_writer, 'this thread does not hold a read or write lock' - with self.lock: - for counts in [self.acquired_readers, self.acquired_writer]: - counts[tid] -= 1 - if counts[tid] <= 0: - del counts[tid] - - wake_readers = not self.waiting_writer_count - wake_writers = self.waiting_writer_count and not self.acquired_readers - - if wake_writers: - with self.ready_for_writes: - self.ready_for_writes.notify() - elif wake_readers: - with self.ready_for_reads: - self.ready_for_reads.notify_all() diff --git a/sideboard/run_debug_server.py b/sideboard/run_debug_server.py deleted file mode 100644 index 9f80019..0000000 --- a/sideboard/run_debug_server.py +++ /dev/null @@ -1,10 +0,0 @@ -from __future__ import unicode_literals -from sideboard.debugging import debugger_helpers_all_init - -import cherrypy - -if __name__ == '__main__': - debugger_helpers_all_init() - - cherrypy.engine.start() - cherrypy.engine.block() diff --git a/sideboard/run_mainloop.py b/sideboard/run_mainloop.py deleted file mode 100644 index 2b3b59d..0000000 --- a/sideboard/run_mainloop.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import unicode_literals -import os -import argparse - -from sideboard.lib import mainloop, entry_point, log - -parser = argparse.ArgumentParser(description='Run Sideboard as a daemon without starting CherryPy') -parser.add_argument('--pidfile', required=True, help='absolute path of file where process pid will be stored') - - -@entry_point -def mainloop_daemon(): - log.info('starting Sideboard daemon process') - args = parser.parse_args() - if os.fork() == 0: - pid = os.fork() - if pid == 0: - mainloop() - else: - log.debug('writing pid (%s) to pidfile (%s)', pid, args.pidfile) - try: - with open(args.pidfile, 'w') as f: - f.write('{}'.format(pid)) - except: - log.error('unexpected error writing pid (%s) to pidfile (%s)', pid, args.pidfile, exc_info=True) - - -@entry_point -def mainloop_foreground(): - mainloop() diff --git a/sideboard/server.py b/sideboard/server.py index 53981b6..c89fc45 100755 --- a/sideboard/server.py +++ b/sideboard/server.py @@ -1,16 +1,7 @@ from __future__ import unicode_literals -import os -import sys - import cherrypy -from sideboard.lib import config, threadlocal - - -def reset_threadlocal(): - threadlocal.reset(username=cherrypy.session.get("username")) - -cherrypy.tools.reset_threadlocal = cherrypy.Tool('before_handler', reset_threadlocal, priority=51) +from sideboard.lib import config cherrypy_config = {} for setting, value in config['cherrypy'].items(): diff --git a/sideboard/templates/connections.html b/sideboard/templates/connections.html deleted file mode 100644 index f62ac69..0000000 --- a/sideboard/templates/connections.html +++ /dev/null @@ -1,17 +0,0 @@ - - - - Sideboard Connection Tests - - -

Sideboard Connection Tests

- {% for service, results in connections.items()|sort %} -

{{ service }}

- - {% endfor %} - - diff --git a/sideboard/templates/list_plugins.html b/sideboard/templates/list_plugins.html deleted file mode 100644 index efc552b..0000000 --- a/sideboard/templates/list_plugins.html +++ /dev/null @@ -1,25 +0,0 @@ - - - - Sideboard Plugins - - -

Welcome to Sideboard (version {{ version|default('not specified', true) }})

-

Sideboard documentation

-

There are {{ plugins|length }} plugins installed

- - {% for path, plugin in plugins.items()|sort %} - - - - - - - {% endfor %} -
{{ plugin.name }}(version {{ plugin.version|default('not specified', true) }}) - {% for path in plugin.paths %} - {{ path }} - {% endfor %} -
- - diff --git a/sideboard/templates/login.html b/sideboard/templates/login.html deleted file mode 100644 index dcad459..0000000 --- a/sideboard/templates/login.html +++ /dev/null @@ -1,31 +0,0 @@ - - - - Login - - - -
{{ message }}
-
- - - - - - - - - - - - - - - - - - diff --git a/sideboard/tests/test_lib.py b/sideboard/tests/test_lib.py index 1d67429..25607a3 100644 --- a/sideboard/tests/test_lib.py +++ b/sideboard/tests/test_lib.py @@ -1,17 +1,11 @@ from __future__ import unicode_literals import json -from time import sleep -from itertools import count from unittest import TestCase from datetime import datetime, date -from collections.abc import Sequence, Set -from threading import current_thread, Thread import pytest -import cherrypy -from mock import Mock -from sideboard.lib import serializer, ajax, is_listy, log, cached_property, request_cached_property, threadlocal, register_authenticator, restricted, all_restricted, RWGuard +from sideboard.lib import serializer class TestSerializer(TestCase): @@ -67,245 +61,3 @@ class Bax(Foo): pass json.dumps(Baz(), cls=serializer) # undefined which function will be used json.dumps(Bax(), cls=serializer) # undefined which function will be used """ - - -class TestIsListy(TestCase): - """ - We test all sequence types, set types, and mapping types listed at - http://docs.python.org/2/library/stdtypes.html plus a few example - user-defined collections subclasses. - """ - - def test_sized_builtin(self): - sized = [(), (1,), [], [1], set(), set([1]), frozenset(), frozenset([1]), - bytearray(), bytearray(1)] - for x in sized: - assert is_listy(x) - - def test_excluded(self): - assert not is_listy({}) - assert not is_listy('') - assert not is_listy(b'') - - def test_unsized_builtin(self): - assert not is_listy(iter([])) - assert not is_listy(i for i in range(2)) - - def test_user_defined_types(self): - class AlwaysEmptySequence(Sequence): - def __len__(self): return 0 - - def __getitem__(self, i): return [][i] - - assert is_listy(AlwaysEmptySequence()) - - class AlwaysEmptySet(Set): - def __len__(self): return 0 - - def __iter__(self): return iter([]) - - def __contains__(self, x): return False - - assert is_listy(AlwaysEmptySet()) - - def test_miscellaneous(self): - class Foo(object): - pass - - for x in [0, 1, False, True, Foo, object, object()]: - assert not is_listy(x) - - -def test_ajaz_serialization(): - class Root(object): - @ajax - def returns_date(self): - return date(2001, 2, 3) - assert '"2001-02-03"' == Root().returns_date() - - -def test_trace_logging(): - log.trace('normally this would be an error') - - -def test_cached_property(): - class Foo(object): - @cached_property - def bar(self): - return 5 - - foo = Foo() - assert not hasattr(foo, '_cached_bar') - assert 5 == foo.bar - assert 5 == foo._cached_bar - foo._cached_bar = 6 - assert 6 == foo.bar - assert 5 == Foo().bar # per-instance caching - - -def test_request_cached_property(): - class Foo(object): - @request_cached_property - def bar(self): - return 5 - - name = __name__ + '.bar' - foo = Foo() - assert threadlocal.get(name) is None - assert 5 == foo.bar - assert 5 == threadlocal.get(name) - threadlocal.set(name, 6) - assert 6 == foo.bar - assert 6 == Foo().bar # cache is shared between instances - - -class TestPluggableAuth(object): - @pytest.fixture(scope='session', autouse=True) - def mock_authenticator(self): - register_authenticator('test', '/mock_login_page', lambda: 'uid' in cherrypy.session) - - @pytest.fixture(autouse=True) - def mock_session(self, monkeypatch): - monkeypatch.setattr(cherrypy, 'session', {}, raising=False) - - def mock_login(self): - cherrypy.session['uid'] = 123 - - def test_double_registration(self): - pytest.raises(Exception, register_authenticator, 'test', 'already registered', lambda: 'this will not register due to an exception') - - def test_unknown_authenticator(self): - pytest.raises(Exception, all_restricted, 'unknown_authenticator') - - def test_all_restricted(self): - self.called = False - - @all_restricted('test') - class AllRestricted(object): - def index(inner_self): - self.called = True - - with pytest.raises(cherrypy.HTTPRedirect) as exc: - AllRestricted().index() - assert not self.called and exc.value.args[0][0].endswith('/mock_login_page') - - self.mock_login() - AllRestricted().index() - assert self.called - - def test_restricted(self): - self.called = False - - class SingleRestricted(object): - @restricted('test') - def index(inner_self): - self.called = True - - with pytest.raises(cherrypy.HTTPRedirect) as exc: - SingleRestricted().index() - assert not self.called and exc.value.args[0][0].endswith('/mock_login_page') - - self.mock_login() - SingleRestricted().index() - assert self.called - - -class TestRWGuard(object): - @pytest.fixture - def guard(self, monkeypatch): - guard = RWGuard() - monkeypatch.setattr(guard.ready_for_writes, 'notify', Mock()) - monkeypatch.setattr(guard.ready_for_reads, 'notify_all', Mock()) - return guard - - def test_read_locked_tracking(self, guard): - assert {} == guard.acquired_readers - with guard.read_locked: - assert {current_thread().ident: 1} == guard.acquired_readers - with guard.read_locked: - assert {current_thread().ident: 2} == guard.acquired_readers - assert {current_thread().ident: 1} == guard.acquired_readers - assert {} == guard.acquired_readers - - def test_write_locked_tracking(self, guard): - assert {} == guard.acquired_writer - with guard.write_locked: - assert {current_thread().ident: 1} == guard.acquired_writer - with guard.write_locked: - assert {current_thread().ident: 2} == guard.acquired_writer - assert {current_thread().ident: 1} == guard.acquired_writer - assert {} == guard.acquired_writer - - def test_multi_read_locking_allowed(self, guard): - guard.acquired_readers['mock-thread-ident'] = 1 - with guard.read_locked: - pass - - def test_read_write_exclusion(self, guard): - with guard.read_locked: - with pytest.raises(AssertionError): - with guard.write_locked: - pass - - def test_write_read_exclusion(self, guard): - with guard.write_locked: - with pytest.raises(Exception): - with guard.read_locked: - pass - - def test_release_requires_acquisition(self, guard): - pytest.raises(AssertionError, guard.release) - - def test_wake_readers(self, guard): - with guard.read_locked: - guard.waiting_writer_count = 1 - assert not guard.ready_for_reads.notify_all.called - - guard.waiting_writer_count = 0 - with guard.read_locked: - pass - assert guard.ready_for_reads.notify_all.called - - def test_wake_writers(self, guard): - with guard.write_locked: - guard.acquired_readers['mock-tid'] = 1 - guard.waiting_writer_count = 1 - assert not guard.ready_for_writes.notify.called - - guard.acquired_readers.clear() - with guard.write_locked: - guard.waiting_writer_count = 0 - assert not guard.ready_for_writes.notify.called - - with guard.write_locked: - guard.waiting_writer_count = 1 - assert guard.ready_for_writes.notify.called - - def test_threading(self): - guard = RWGuard() - read, written = [False], [False] - - def reader(): - with guard.read_locked: - read[0] = True - - def writer(): - with guard.write_locked: - written[0] = True - - with guard.write_locked: - Thread(target=reader).start() - Thread(target=writer).start() - sleep(0.1) - assert not read[0] and not written[0] - sleep(0.1) - assert read[0] and written[0] - - read, written = [False], [False] - with guard.read_locked: - Thread(target=reader).start() - Thread(target=writer).start() - sleep(0.1) - assert read[0] and not written[0] - sleep(0.1) - assert read[0] and written[0] diff --git a/sideboard/tests/test_logging.py b/sideboard/tests/test_logging.py deleted file mode 100644 index da5883a..0000000 --- a/sideboard/tests/test_logging.py +++ /dev/null @@ -1,20 +0,0 @@ -from __future__ import unicode_literals -import logging -import unittest - -from io import StringIO - - -class LoggerSetupTest(unittest.TestCase): - def _stream(self): - return StringIO() - - def _logger(self, logger_name, stream): - logging.getLogger().addHandler(logging.StreamHandler(stream)) - return logging.getLogger(logger_name) - - def test_importing_sideboard_doesnt_break_dummy_logger(self): - stream = self._stream() - dummy_logger = self._logger('dummy', stream) - dummy_logger.warning('do not break dummy logger') - assert stream.getvalue() == 'do not break dummy logger\n'

Login

Username:
Password: