diff --git a/setup.py b/setup.py
index 217a0708ffef3..ec12deafb99a4 100644
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@ def get_git_sha():
s = str(subprocess.check_output(['git', 'rev-parse', 'HEAD']))
return s.strip()
except Exception:
- return ""
+ return ''
GIT_SHA = get_git_sha()
@@ -24,10 +24,10 @@ def get_git_sha():
'GIT_SHA': GIT_SHA,
'version': version_string,
}
-print("-==-" * 15)
-print("VERSION: " + version_string)
-print("GIT SHA: " + GIT_SHA)
-print("-==-" * 15)
+print('-==-' * 15)
+print('VERSION: ' + version_string)
+print('GIT SHA: ' + GIT_SHA)
+print('-==-' * 15)
with open(os.path.join(PACKAGE_DIR, 'version_info.json'), 'w') as version_file:
json.dump(version_info, version_file)
@@ -36,8 +36,8 @@ def get_git_sha():
setup(
name='superset',
description=(
- "A interactive data visualization platform build on SqlAlchemy "
- "and druid.io"),
+ 'A interactive data visualization platform build on SqlAlchemy '
+ 'and druid.io'),
version=version_string,
packages=find_packages(),
include_package_data=True,
diff --git a/superset/__init__.py b/superset/__init__.py
index 6c1c9065b4cea..099edc133e152 100644
--- a/superset/__init__.py
+++ b/superset/__init__.py
@@ -42,7 +42,7 @@ def parse_manifest_json():
with open(MANIFEST_FILE, 'r') as f:
manifest = json.load(f)
except Exception:
- print("no manifest file found at " + MANIFEST_FILE)
+ print('no manifest file found at ' + MANIFEST_FILE)
def get_manifest_file(filename):
@@ -66,7 +66,7 @@ def get_js_manifest():
print("Registering blueprint: '{}'".format(bp.name))
app.register_blueprint(bp)
except Exception as e:
- print("blueprint registration failed")
+ print('blueprint registration failed')
logging.exception(e)
if conf.get('SILENCE_FAB'):
@@ -91,7 +91,7 @@ def get_js_manifest():
cache = utils.setup_cache(app, conf.get('CACHE_CONFIG'))
tables_cache = utils.setup_cache(app, conf.get('TABLE_NAMES_CACHE_CONFIG'))
-migrate = Migrate(app, db, directory=APP_DIR + "/migrations")
+migrate = Migrate(app, db, directory=APP_DIR + '/migrations')
# Logging configuration
logging.basicConfig(format=app.config.get('LOG_FORMAT'))
@@ -149,15 +149,15 @@ def index(self):
db.session,
base_template='superset/base.html',
indexview=MyIndexView,
- security_manager_class=app.config.get("CUSTOM_SECURITY_MANAGER"))
+ security_manager_class=app.config.get('CUSTOM_SECURITY_MANAGER'))
sm = appbuilder.sm
-results_backend = app.config.get("RESULTS_BACKEND")
+results_backend = app.config.get('RESULTS_BACKEND')
# Registering sources
-module_datasource_map = app.config.get("DEFAULT_MODULE_DS_MAP")
-module_datasource_map.update(app.config.get("ADDITIONAL_MODULE_DS_MAP"))
+module_datasource_map = app.config.get('DEFAULT_MODULE_DS_MAP')
+module_datasource_map.update(app.config.get('ADDITIONAL_MODULE_DS_MAP'))
ConnectorRegistry.register_sources(module_datasource_map)
from superset import views # noqa
diff --git a/superset/cli.py b/superset/cli.py
index 37a0f63698938..540bea8ceed25 100755
--- a/superset/cli.py
+++ b/superset/cli.py
@@ -29,38 +29,38 @@ def init():
@manager.option(
'-d', '--debug', action='store_true',
- help="Start the web server in debug mode")
+ help='Start the web server in debug mode')
@manager.option(
'-n', '--no-reload', action='store_false', dest='no_reload',
- default=config.get("FLASK_USE_RELOAD"),
+ default=config.get('FLASK_USE_RELOAD'),
help="Don't use the reloader in debug mode")
@manager.option(
- '-a', '--address', default=config.get("SUPERSET_WEBSERVER_ADDRESS"),
- help="Specify the address to which to bind the web server")
+ '-a', '--address', default=config.get('SUPERSET_WEBSERVER_ADDRESS'),
+ help='Specify the address to which to bind the web server')
@manager.option(
- '-p', '--port', default=config.get("SUPERSET_WEBSERVER_PORT"),
- help="Specify the port on which to run the web server")
+ '-p', '--port', default=config.get('SUPERSET_WEBSERVER_PORT'),
+ help='Specify the port on which to run the web server')
@manager.option(
'-w', '--workers',
- default=config.get("SUPERSET_WORKERS", 2),
- help="Number of gunicorn web server workers to fire up")
+ default=config.get('SUPERSET_WORKERS', 2),
+ help='Number of gunicorn web server workers to fire up')
@manager.option(
- '-t', '--timeout', default=config.get("SUPERSET_WEBSERVER_TIMEOUT"),
- help="Specify the timeout (seconds) for the gunicorn web server")
+ '-t', '--timeout', default=config.get('SUPERSET_WEBSERVER_TIMEOUT'),
+ help='Specify the timeout (seconds) for the gunicorn web server')
@manager.option(
- '-s', '--socket', default=config.get("SUPERSET_WEBSERVER_SOCKET"),
- help="Path to a UNIX socket as an alternative to address:port, e.g. "
- "/var/run/superset.sock. "
- "Will override the address and port values.")
+ '-s', '--socket', default=config.get('SUPERSET_WEBSERVER_SOCKET'),
+ help='Path to a UNIX socket as an alternative to address:port, e.g. '
+ '/var/run/superset.sock. '
+ 'Will override the address and port values.')
def runserver(debug, no_reload, address, port, timeout, workers, socket):
"""Starts a Superset web server."""
- debug = debug or config.get("DEBUG")
+ debug = debug or config.get('DEBUG')
if debug:
print(Fore.BLUE + '-=' * 20)
print(
- Fore.YELLOW + "Starting Superset server in " +
- Fore.RED + "DEBUG" +
- Fore.YELLOW + " mode")
+ Fore.YELLOW + 'Starting Superset server in ' +
+ Fore.RED + 'DEBUG' +
+ Fore.YELLOW + ' mode')
print(Fore.BLUE + '-=' * 20)
print(Style.RESET_ALL)
app.run(
@@ -70,16 +70,16 @@ def runserver(debug, no_reload, address, port, timeout, workers, socket):
debug=True,
use_reloader=no_reload)
else:
- addr_str = " unix:{socket} " if socket else" {address}:{port} "
+ addr_str = ' unix:{socket} ' if socket else' {address}:{port} '
cmd = (
- "gunicorn "
- "-w {workers} "
- "--timeout {timeout} "
- "-b " + addr_str +
- "--limit-request-line 0 "
- "--limit-request-field_size 0 "
- "superset:app").format(**locals())
- print(Fore.GREEN + "Starting server with command: ")
+ 'gunicorn '
+ '-w {workers} '
+ '--timeout {timeout} '
+ '-b ' + addr_str +
+ '--limit-request-line 0 '
+ '--limit-request-field_size 0 '
+ 'superset:app').format(**locals())
+ print(Fore.GREEN + 'Starting server with command: ')
print(Fore.YELLOW + cmd)
print(Style.RESET_ALL)
Popen(cmd, shell=True).wait()
@@ -87,69 +87,69 @@ def runserver(debug, no_reload, address, port, timeout, workers, socket):
@manager.option(
'-v', '--verbose', action='store_true',
- help="Show extra information")
+ help='Show extra information')
def version(verbose):
"""Prints the current version number"""
print(Fore.BLUE + '-=' * 15)
- print(Fore.YELLOW + "Superset " + Fore.CYAN + "{version}".format(
+ print(Fore.YELLOW + 'Superset ' + Fore.CYAN + '{version}'.format(
version=config.get('VERSION_STRING')))
print(Fore.BLUE + '-=' * 15)
if verbose:
- print("[DB] : " + "{}".format(db.engine))
+ print('[DB] : ' + '{}'.format(db.engine))
print(Style.RESET_ALL)
@manager.option(
'-t', '--load-test-data', action='store_true',
- help="Load additional test data")
+ help='Load additional test data')
def load_examples(load_test_data):
"""Loads a set of Slices and Dashboards and a supporting dataset """
from superset import data
- print("Loading examples into {}".format(db))
+ print('Loading examples into {}'.format(db))
data.load_css_templates()
- print("Loading energy related dataset")
+ print('Loading energy related dataset')
data.load_energy()
print("Loading [World Bank's Health Nutrition and Population Stats]")
data.load_world_bank_health_n_pop()
- print("Loading [Birth names]")
+ print('Loading [Birth names]')
data.load_birth_names()
- print("Loading [Random time series data]")
+ print('Loading [Random time series data]')
data.load_random_time_series_data()
- print("Loading [Random long/lat data]")
+ print('Loading [Random long/lat data]')
data.load_long_lat_data()
- print("Loading [Country Map data]")
+ print('Loading [Country Map data]')
data.load_country_map_data()
- print("Loading [Multiformat time series]")
+ print('Loading [Multiformat time series]')
data.load_multiformat_time_series_data()
- print("Loading [Misc Charts] dashboard")
+ print('Loading [Misc Charts] dashboard')
data.load_misc_dashboard()
if load_test_data:
- print("Loading [Unicode test data]")
+ print('Loading [Unicode test data]')
data.load_unicode_test_data()
@manager.option(
'-d', '--datasource',
help=(
- "Specify which datasource name to load, if omitted, all "
- "datasources will be refreshed"
+ 'Specify which datasource name to load, if omitted, all '
+ 'datasources will be refreshed'
),
)
@manager.option(
'-m', '--merge',
help=(
"Specify using 'merge' property during operation. "
- "Default value is False "
+ 'Default value is False '
),
)
def refresh_druid(datasource, merge):
@@ -167,8 +167,8 @@ def refresh_druid(datasource, merge):
logging.exception(e)
cluster.metadata_last_refreshed = datetime.now()
print(
- "Refreshed metadata from cluster "
- "[" + cluster.cluster_name + "]")
+ 'Refreshed metadata from cluster '
+ '[' + cluster.cluster_name + ']')
session.commit()
@@ -188,14 +188,14 @@ def update_datasources_cache():
@manager.option(
'-w', '--workers',
type=int,
- help="Number of celery server workers to fire up")
+ help='Number of celery server workers to fire up')
def worker(workers):
"""Starts a Superset worker for async SQL query execution."""
if workers:
celery_app.conf.update(CELERYD_CONCURRENCY=workers)
- elif config.get("SUPERSET_CELERY_WORKERS"):
+ elif config.get('SUPERSET_CELERY_WORKERS'):
celery_app.conf.update(
- CELERYD_CONCURRENCY=config.get("SUPERSET_CELERY_WORKERS"))
+ CELERYD_CONCURRENCY=config.get('SUPERSET_CELERY_WORKERS'))
worker = celery_app.Worker(optimization='fair')
worker.start()
@@ -216,12 +216,12 @@ def flower(port, address):
broker"""
BROKER_URL = celery_app.conf.BROKER_URL
cmd = (
- "celery flower "
- "--broker={BROKER_URL} "
- "--port={port} "
- "--address={address} "
+ 'celery flower '
+ '--broker={BROKER_URL} '
+ '--port={port} '
+ '--address={address} '
).format(**locals())
- print(Fore.GREEN + "Starting a Celery Flower instance")
+ print(Fore.GREEN + 'Starting a Celery Flower instance')
print(Fore.BLUE + '-=' * 40)
print(Fore.YELLOW + cmd)
print(Fore.BLUE + '-=' * 40)
diff --git a/superset/config.py b/superset/config.py
index 95ee3b0dc1173..2d168a9b8d642 100644
--- a/superset/config.py
+++ b/superset/config.py
@@ -92,10 +92,10 @@
# GLOBALS FOR APP Builder
# ------------------------------
# Uncomment to setup Your App name
-APP_NAME = "Superset"
+APP_NAME = 'Superset'
# Uncomment to setup an App icon
-APP_ICON = "/static/assets/images/superset-logo@2x.png"
+APP_ICON = '/static/assets/images/superset-logo@2x.png'
# Druid query timezone
# tz.tzutc() : Using utc timezone
@@ -239,7 +239,7 @@
BACKUP_COUNT = 30
# Set this API key to enable Mapbox visualizations
-MAPBOX_API_KEY = ""
+MAPBOX_API_KEY = ''
# Maximum number of rows returned in the SQL editor
SQL_MAX_ROW = 1000000
@@ -329,7 +329,7 @@ class CeleryConfig(object):
# The link to a page containing common errors and their resolutions
# It will be appended at the bottom of sql_lab errors.
-TROUBLESHOOTING_LINK = ""
+TROUBLESHOOTING_LINK = ''
# Integrate external Blueprints to the app by passing them to your
diff --git a/superset/connectors/base/models.py b/superset/connectors/base/models.py
index 0a03366fdbba0..8042ac9c1e037 100644
--- a/superset/connectors/base/models.py
+++ b/superset/connectors/base/models.py
@@ -60,7 +60,7 @@ def slices(self):
@property
def uid(self):
"""Unique id across datasource types"""
- return "{self.id}__{self.type}".format(**locals())
+ return '{self.id}__{self.type}'.format(**locals())
@property
def column_names(self):
@@ -72,7 +72,7 @@ def columns_types(self):
@property
def main_dttm_col(self):
- return "timestamp"
+ return 'timestamp'
@property
def connection(self):
@@ -105,7 +105,7 @@ def explore_url(self):
if self.default_endpoint:
return self.default_endpoint
else:
- return "/superset/explore/{obj.type}/{obj.id}/".format(obj=self)
+ return '/superset/explore/{obj.type}/{obj.id}/'.format(obj=self)
@property
def column_formats(self):
diff --git a/superset/connectors/base/views.py b/superset/connectors/base/views.py
index 97a40ebf682c7..2794d183d778c 100644
--- a/superset/connectors/base/views.py
+++ b/superset/connectors/base/views.py
@@ -8,6 +8,6 @@ class DatasourceModelView(SupersetModelView):
def pre_delete(self, obj):
if obj.slices:
raise SupersetException(Markup(
- "Cannot delete a datasource that has slices attached to it."
+ 'Cannot delete a datasource that has slices attached to it.'
"Here's the list of associated slices: " +
- "".join([o.slice_link for o in obj.slices])))
+ ''.join([o.slice_link for o in obj.slices])))
diff --git a/superset/connectors/druid/models.py b/superset/connectors/druid/models.py
index afbfb26da520d..4c8a016cb53a5 100644
--- a/superset/connectors/druid/models.py
+++ b/superset/connectors/druid/models.py
@@ -33,7 +33,7 @@
DimSelector, DTTM_ALIAS, flasher, MetricPermException,
)
-DRUID_TZ = conf.get("DRUID_TZ")
+DRUID_TZ = conf.get('DRUID_TZ')
# Function wrapper because bound methods cannot
@@ -65,7 +65,7 @@ class DruidCluster(Model, AuditMixinNullable):
"""ORM object referencing the Druid clusters"""
__tablename__ = 'clusters'
- type = "druid"
+ type = 'druid'
id = Column(Integer, primary_key=True)
verbose_name = Column(String(250), unique=True)
@@ -86,21 +86,21 @@ def __repr__(self):
def get_pydruid_client(self):
cli = PyDruid(
- "http://{0}:{1}/".format(self.broker_host, self.broker_port),
+ 'http://{0}:{1}/'.format(self.broker_host, self.broker_port),
self.broker_endpoint)
return cli
def get_datasources(self):
endpoint = (
- "http://{obj.coordinator_host}:{obj.coordinator_port}/"
- "{obj.coordinator_endpoint}/datasources"
+ 'http://{obj.coordinator_host}:{obj.coordinator_port}/'
+ '{obj.coordinator_endpoint}/datasources'
).format(obj=self)
return json.loads(requests.get(endpoint).text)
def get_druid_version(self):
endpoint = (
- "http://{obj.coordinator_host}:{obj.coordinator_port}/status"
+ 'http://{obj.coordinator_host}:{obj.coordinator_port}/status'
).format(obj=self)
return json.loads(requests.get(endpoint).text)['version']
@@ -144,11 +144,11 @@ def refresh_async(self, datasource_names, merge_flag, refreshAll):
with session.no_autoflush:
session.add(datasource)
flasher(
- "Adding new datasource [{}]".format(ds_name), 'success')
+ 'Adding new datasource [{}]'.format(ds_name), 'success')
ds_map[ds_name] = datasource
elif refreshAll:
flasher(
- "Refreshing datasource [{}]".format(ds_name), 'info')
+ 'Refreshing datasource [{}]'.format(ds_name), 'info')
else:
del ds_map[ds_name]
continue
@@ -200,7 +200,7 @@ def refresh_async(self, datasource_names, merge_flag, refreshAll):
@property
def perm(self):
- return "[{obj.cluster_name}].(id:{obj.id})".format(obj=self)
+ return '[{obj.cluster_name}].(id:{obj.id})'.format(obj=self)
def get_perm(self):
return self.perm
@@ -390,7 +390,7 @@ def json_obj(self):
@property
def perm(self):
return (
- "{parent_name}.[{obj.metric_name}](id:{obj.id})"
+ '{parent_name}.[{obj.metric_name}](id:{obj.id})'
).format(obj=self,
parent_name=self.datasource.full_name,
) if self.datasource else None
@@ -410,13 +410,13 @@ class DruidDatasource(Model, BaseDatasource):
__tablename__ = 'datasources'
- type = "druid"
- query_langtage = "json"
+ type = 'druid'
+ query_langtage = 'json'
cluster_class = DruidCluster
metric_class = DruidMetric
column_class = DruidColumn
- baselink = "druiddatasourcemodelview"
+ baselink = 'druiddatasourcemodelview'
# Columns
datasource_name = Column(String(255), unique=True)
@@ -469,8 +469,8 @@ def schema_perm(self):
def get_perm(self):
return (
- "[{obj.cluster_name}].[{obj.datasource_name}]"
- "(id:{obj.id})").format(obj=self)
+ '[{obj.cluster_name}].[{obj.datasource_name}]'
+ '(id:{obj.id})').format(obj=self)
@property
def link(self):
@@ -485,13 +485,13 @@ def full_name(self):
@property
def time_column_grains(self):
return {
- "time_columns": [
+ 'time_columns': [
'all', '5 seconds', '30 seconds', '1 minute',
'5 minutes', '1 hour', '6 hour', '1 day', '7 days',
'week', 'week_starting_sunday', 'week_ending_saturday',
'month',
],
- "time_grains": ['now'],
+ 'time_grains': ['now'],
}
def __repr__(self):
@@ -499,7 +499,7 @@ def __repr__(self):
@renders('datasource_name')
def datasource_link(self):
- url = "/superset/explore/{obj.type}/{obj.id}/".format(obj=self)
+ url = '/superset/explore/{obj.type}/{obj.id}/'.format(obj=self)
name = escape(self.datasource_name)
return Markup('{name}'.format(**locals()))
@@ -561,7 +561,7 @@ def int_or_0(v):
def latest_metadata(self):
"""Returns segment metadata from the latest segment"""
- logging.info("Syncing datasource [{}]".format(self.datasource_name))
+ logging.info('Syncing datasource [{}]'.format(self.datasource_name))
client = self.cluster.get_pydruid_client()
results = client.time_boundary(datasource=self.datasource_name)
if not results:
@@ -585,7 +585,7 @@ def latest_metadata(self):
merge=self.merge_flag,
analysisTypes=[])
except Exception as e:
- logging.warning("Failed first attempt to get latest segment")
+ logging.warning('Failed first attempt to get latest segment')
logging.exception(e)
if not segment_metadata:
# if no segments in the past 7 days, look at all segments
@@ -601,7 +601,7 @@ def latest_metadata(self):
merge=self.merge_flag,
analysisTypes=[])
except Exception as e:
- logging.warning("Failed 2nd attempt to get latest segment")
+ logging.warning('Failed 2nd attempt to get latest segment')
logging.exception(e)
if segment_metadata:
return segment_metadata[-1]['columns']
@@ -669,7 +669,7 @@ def sync_to_db_from_config(
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
- type="STRING",
+ type='STRING',
datasource=datasource,
)
session.add(col_obj)
@@ -678,20 +678,20 @@ def sync_to_db_from_config(
session.query(DruidMetric)
.filter(DruidMetric.datasource_name == druid_config['name'])
.filter(or_(DruidMetric.metric_name == spec['name']
- for spec in druid_config["metrics_spec"]))
+ for spec in druid_config['metrics_spec']))
)
metric_objs = {metric.metric_name: metric for metric in metric_objs}
- for metric_spec in druid_config["metrics_spec"]:
- metric_name = metric_spec["name"]
- metric_type = metric_spec["type"]
+ for metric_spec in druid_config['metrics_spec']:
+ metric_name = metric_spec['name']
+ metric_type = metric_spec['type']
metric_json = json.dumps(metric_spec)
- if metric_type == "count":
- metric_type = "longSum"
+ if metric_type == 'count':
+ metric_type = 'longSum'
metric_json = json.dumps({
- "type": "longSum",
- "name": metric_name,
- "fieldName": metric_name,
+ 'type': 'longSum',
+ 'name': metric_name,
+ 'fieldName': metric_name,
})
metric_obj = metric_objs.get(metric_name, None)
@@ -699,11 +699,11 @@ def sync_to_db_from_config(
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
- verbose_name="%s(%s)" % (metric_type, metric_name),
+ verbose_name='%s(%s)' % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
- "Imported from the airolap config dir for %s" %
+ 'Imported from the airolap config dir for %s' %
druid_config['name']),
)
session.add(metric_obj)
@@ -823,7 +823,7 @@ def recursive_get_fields(_conf):
)
elif mconf.get('type') == 'arithmetic':
post_aggs[metric_name] = Postaggregator(
- mconf.get('fn', "/"),
+ mconf.get('fn', '/'),
mconf.get('fields', []),
mconf.get('name', ''))
else:
@@ -844,11 +844,11 @@ def values_for_column(self,
qry = dict(
datasource=self.datasource_name,
- granularity="all",
+ granularity='all',
intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(),
- aggregations=dict(count=count("count")),
+ aggregations=dict(count=count('count')),
dimension=column_name,
- metric="count",
+ metric='count',
threshold=limit,
)
@@ -870,16 +870,16 @@ def _add_filter_from_pre_query_data(self, df, dimensions, dim_filter):
f = Dimension(dim) == row[dim]
fields.append(f)
if len(fields) > 1:
- term = Filter(type="and", fields=fields)
+ term = Filter(type='and', fields=fields)
new_filters.append(term)
elif fields:
new_filters.append(fields[0])
if new_filters:
- ff = Filter(type="or", fields=new_filters)
+ ff = Filter(type='or', fields=new_filters)
if not dim_filter:
ret = ff
else:
- ret = Filter(type="and", fields=[ff, dim_filter])
+ ret = Filter(type='and', fields=[ff, dim_filter])
return ret
def run_query( # noqa / druid
@@ -913,7 +913,7 @@ def run_query( # noqa / druid
to_dttm = to_dttm.replace(tzinfo=DRUID_TZ)
timezone = from_dttm.tzname()
- query_str = ""
+ query_str = ''
metrics_dict = {m.metric_name: m for m in self.metrics}
columns_dict = {c.column_name: c for c in self.columns}
@@ -936,7 +936,7 @@ def run_query( # noqa / druid
if rejected_metrics:
raise MetricPermException(
- "Access to the metrics denied: " + ', '.join(rejected_metrics),
+ 'Access to the metrics denied: ' + ', '.join(rejected_metrics),
)
# the dimensions list with dimensionSpecs expanded
@@ -969,7 +969,7 @@ def run_query( # noqa / druid
having_filters = self.get_having_filters(extras.get('having_druid'))
if having_filters:
qry['having'] = having_filters
- order_direction = "descending" if order_desc else "ascending"
+ order_direction = 'descending' if order_desc else 'ascending'
if len(groupby) == 0 and not having_filters:
del qry['dimensions']
client.timeseries(**qry)
@@ -987,17 +987,17 @@ def run_query( # noqa / druid
order_by = list(qry['aggregations'].keys())[0]
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
- pre_qry['granularity'] = "all"
+ pre_qry['granularity'] = 'all'
pre_qry['threshold'] = min(row_limit,
timeseries_limit or row_limit)
pre_qry['metric'] = order_by
pre_qry['dimension'] = dim
del pre_qry['dimensions']
client.topn(**pre_qry)
- query_str += "// Two phase query\n// Phase 1\n"
+ query_str += '// Two phase query\n// Phase 1\n'
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
- query_str += "\n"
+ query_str += '\n'
if phase == 1:
return query_str
query_str += (
@@ -1023,23 +1023,23 @@ def run_query( # noqa / druid
order_by = timeseries_limit_metric
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
- pre_qry['granularity'] = "all"
+ pre_qry['granularity'] = 'all'
pre_qry['limit_spec'] = {
- "type": "default",
- "limit": min(timeseries_limit, row_limit),
+ 'type': 'default',
+ 'limit': min(timeseries_limit, row_limit),
'intervals': (
inner_from_dttm.isoformat() + '/' +
inner_to_dttm.isoformat()),
- "columns": [{
- "dimension": order_by,
- "direction": order_direction,
+ 'columns': [{
+ 'dimension': order_by,
+ 'direction': order_direction,
}],
}
client.groupby(**pre_qry)
- query_str += "// Two phase query\n// Phase 1\n"
+ query_str += '// Two phase query\n// Phase 1\n'
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
- query_str += "\n"
+ query_str += '\n'
if phase == 1:
return query_str
query_str += (
@@ -1053,12 +1053,12 @@ def run_query( # noqa / druid
qry['limit_spec'] = None
if row_limit:
qry['limit_spec'] = {
- "type": "default",
- "limit": row_limit,
- "columns": [{
- "dimension": (
+ 'type': 'default',
+ 'limit': row_limit,
+ 'columns': [{
+ 'dimension': (
metrics[0] if metrics else self.metrics[0]),
- "direction": order_direction,
+ 'direction': order_direction,
}],
}
client.groupby(**qry)
@@ -1074,7 +1074,7 @@ def query(self, query_obj):
df = client.export_pandas()
if df is None or df.size == 0:
- raise Exception(_("No data was returned."))
+ raise Exception(_('No data was returned.'))
df.columns = [
DTTM_ALIAS if c == 'timestamp' else c for c in df.columns]
@@ -1120,7 +1120,7 @@ def get_filters(raw_filters, num_cols): # noqa
cond = None
if op in ('in', 'not in'):
eq = [
- types.replace("'", '').strip()
+ types.replace('"', '').strip()
if isinstance(types, string_types)
else types
for types in eq]
@@ -1149,13 +1149,13 @@ def get_filters(raw_filters, num_cols): # noqa
else:
for s in eq:
fields.append(Dimension(col) == s)
- cond = Filter(type="or", fields=fields)
+ cond = Filter(type='or', fields=fields)
if op == 'not in':
cond = ~cond
elif op == 'regex':
- cond = Filter(type="regex", pattern=eq, dimension=col)
+ cond = Filter(type='regex', pattern=eq, dimension=col)
elif op == '>=':
cond = Bound(col, eq, None, alphaNumeric=is_numeric_col)
elif op == '<=':
@@ -1172,7 +1172,7 @@ def get_filters(raw_filters, num_cols): # noqa
)
if filters:
- filters = Filter(type="and", fields=[
+ filters = Filter(type='and', fields=[
cond,
filters,
])
diff --git a/superset/connectors/druid/views.py b/superset/connectors/druid/views.py
index 5293166c77fc5..713a43c36be3f 100644
--- a/superset/connectors/druid/views.py
+++ b/superset/connectors/druid/views.py
@@ -37,27 +37,27 @@ class DruidColumnInlineView(CompactCRUDMixin, SupersetModelView): # noqa
can_delete = False
page_size = 500
label_columns = {
- 'column_name': _("Column"),
- 'type': _("Type"),
- 'datasource': _("Datasource"),
- 'groupby': _("Groupable"),
- 'filterable': _("Filterable"),
- 'count_distinct': _("Count Distinct"),
- 'sum': _("Sum"),
- 'min': _("Min"),
- 'max': _("Max"),
+ 'column_name': _('Column'),
+ 'type': _('Type'),
+ 'datasource': _('Datasource'),
+ 'groupby': _('Groupable'),
+ 'filterable': _('Filterable'),
+ 'count_distinct': _('Count Distinct'),
+ 'sum': _('Sum'),
+ 'min': _('Min'),
+ 'max': _('Max'),
}
description_columns = {
'filterable': _(
- "Whether this column is exposed in the `Filters` section "
- "of the explore view."),
+ 'Whether this column is exposed in the `Filters` section '
+ 'of the explore view.'),
'dimension_spec_json': utils.markdown(
- "this field can be used to specify "
- "a `dimensionSpec` as documented [here]"
- "(http://druid.io/docs/latest/querying/dimensionspecs.html). "
- "Make sure to input valid JSON and that the "
- "`outputName` matches the `column_name` defined "
- "above.",
+ 'this field can be used to specify '
+ 'a `dimensionSpec` as documented [here]'
+ '(http://druid.io/docs/latest/querying/dimensionspecs.html). '
+ 'Make sure to input valid JSON and that the '
+ '`outputName` matches the `column_name` defined '
+ 'above.',
True),
}
@@ -91,23 +91,23 @@ class DruidMetricInlineView(CompactCRUDMixin, SupersetModelView): # noqa
}
description_columns = {
'metric_type': utils.markdown(
- "use `postagg` as the metric type if you are defining a "
- "[Druid Post Aggregation]"
- "(http://druid.io/docs/latest/querying/post-aggregations.html)",
+ 'use `postagg` as the metric type if you are defining a '
+ '[Druid Post Aggregation]'
+ '(http://druid.io/docs/latest/querying/post-aggregations.html)',
True),
- 'is_restricted': _("Whether the access to this metric is restricted "
- "to certain roles. Only roles with the permission "
+ 'is_restricted': _('Whether the access to this metric is restricted '
+ 'to certain roles. Only roles with the permission '
"'metric access on XXX (the name of this metric)' "
- "are allowed to access this metric"),
+ 'are allowed to access this metric'),
}
label_columns = {
- 'metric_name': _("Metric"),
- 'description': _("Description"),
- 'verbose_name': _("Verbose Name"),
- 'metric_type': _("Type"),
- 'json': _("JSON"),
- 'datasource': _("Druid Datasource"),
- 'warning_text': _("Warning Message"),
+ 'metric_name': _('Metric'),
+ 'description': _('Description'),
+ 'verbose_name': _('Verbose Name'),
+ 'metric_type': _('Type'),
+ 'json': _('JSON'),
+ 'datasource': _('Druid Datasource'),
+ 'warning_text': _('Warning Message'),
}
def post_add(self, metric):
@@ -139,13 +139,13 @@ class DruidClusterModelView(SupersetModelView, DeleteMixin): # noqa
list_columns = ['cluster_name', 'metadata_last_refreshed']
search_columns = ('cluster_name',)
label_columns = {
- 'cluster_name': _("Cluster"),
- 'coordinator_host': _("Coordinator Host"),
- 'coordinator_port': _("Coordinator Port"),
- 'coordinator_endpoint': _("Coordinator Endpoint"),
- 'broker_host': _("Broker Host"),
- 'broker_port': _("Broker Port"),
- 'broker_endpoint': _("Broker Endpoint"),
+ 'cluster_name': _('Cluster'),
+ 'coordinator_host': _('Coordinator Host'),
+ 'coordinator_port': _('Coordinator Port'),
+ 'coordinator_endpoint': _('Coordinator Endpoint'),
+ 'broker_host': _('Broker Host'),
+ 'broker_port': _('Broker Port'),
+ 'broker_endpoint': _('Broker Endpoint'),
}
def pre_add(self, cluster):
@@ -160,11 +160,11 @@ def _delete(self, pk):
appbuilder.add_view(
DruidClusterModelView,
- name="Druid Clusters",
- label=__("Druid Clusters"),
- icon="fa-cubes",
- category="Sources",
- category_label=__("Sources"),
+ name='Druid Clusters',
+ label=__('Druid Clusters'),
+ icon='fa-cubes',
+ category='Sources',
+ category_label=__('Sources'),
category_icon='fa-database',)
@@ -195,44 +195,44 @@ class DruidDatasourceModelView(DatasourceModelView, DeleteMixin): # noqa
base_order = ('datasource_name', 'asc')
description_columns = {
'slices': _(
- "The list of slices associated with this table. By "
- "altering this datasource, you may change how these associated "
- "slices behave. "
- "Also note that slices need to point to a datasource, so "
- "this form will fail at saving if removing slices from a "
- "datasource. If you want to change the datasource for a slice, "
+ 'The list of slices associated with this table. By '
+ 'altering this datasource, you may change how these associated '
+ 'slices behave. '
+ 'Also note that slices need to point to a datasource, so '
+ 'this form will fail at saving if removing slices from a '
+ 'datasource. If you want to change the datasource for a slice, '
"overwrite the slice from the 'explore view'"),
- 'offset': _("Timezone offset (in hours) for this datasource"),
+ 'offset': _('Timezone offset (in hours) for this datasource'),
'description': Markup(
- "Supports markdown"),
+ 'Supports markdown'),
'fetch_values_from': _(
- "Time expression to use as a predicate when retrieving "
- "distinct values to populate the filter component. "
- "Only applies when `Enable Filter Select` is on. If "
- "you enter `7 days ago`, the distinct list of values in "
- "the filter will be populated based on the distinct value over "
- "the past week"),
+ 'Time expression to use as a predicate when retrieving '
+ 'distinct values to populate the filter component. '
+ 'Only applies when `Enable Filter Select` is on. If '
+ 'you enter `7 days ago`, the distinct list of values in '
+ 'the filter will be populated based on the distinct value over '
+ 'the past week'),
'filter_select_enabled': _(
"Whether to populate the filter's dropdown in the explore "
"view's filter section with a list of distinct values fetched "
- "from the backend on the fly"),
+ 'from the backend on the fly'),
'default_endpoint': _(
- "Redirects to this endpoint when clicking on the datasource "
- "from the datasource list"),
+ 'Redirects to this endpoint when clicking on the datasource '
+ 'from the datasource list'),
}
base_filters = [['id', DatasourceFilter, lambda: []]]
label_columns = {
- 'slices': _("Associated Slices"),
- 'datasource_link': _("Data Source"),
- 'cluster': _("Cluster"),
- 'description': _("Description"),
- 'owner': _("Owner"),
- 'is_hidden': _("Is Hidden"),
- 'filter_select_enabled': _("Enable Filter Select"),
- 'default_endpoint': _("Default Endpoint"),
- 'offset': _("Time Offset"),
- 'cache_timeout': _("Cache Timeout"),
+ 'slices': _('Associated Slices'),
+ 'datasource_link': _('Data Source'),
+ 'cluster': _('Cluster'),
+ 'description': _('Description'),
+ 'owner': _('Owner'),
+ 'is_hidden': _('Is Hidden'),
+ 'filter_select_enabled': _('Enable Filter Select'),
+ 'default_endpoint': _('Default Endpoint'),
+ 'offset': _('Time Offset'),
+ 'cache_timeout': _('Cache Timeout'),
}
def pre_add(self, datasource):
@@ -263,18 +263,18 @@ def _delete(self, pk):
appbuilder.add_view(
DruidDatasourceModelView,
- "Druid Datasources",
- label=__("Druid Datasources"),
- category="Sources",
- category_label=__("Sources"),
- icon="fa-cube")
+ 'Druid Datasources',
+ label=__('Druid Datasources'),
+ category='Sources',
+ category_label=__('Sources'),
+ icon='fa-cube')
class Druid(BaseSupersetView):
"""The base views for Superset!"""
@has_access
- @expose("/refresh_datasources/")
+ @expose('/refresh_datasources/')
def refresh_datasources(self, refreshAll=True):
"""endpoint that refreshes druid datasources metadata"""
session = db.session()
@@ -287,19 +287,19 @@ def refresh_datasources(self, refreshAll=True):
flash(
"Error while processing cluster '{}'\n{}".format(
cluster_name, utils.error_msg_from_exception(e)),
- "danger")
+ 'danger')
logging.exception(e)
return redirect('/druidclustermodelview/list/')
cluster.metadata_last_refreshed = datetime.now()
flash(
- "Refreshed metadata from cluster "
- "[" + cluster.cluster_name + "]",
+ 'Refreshed metadata from cluster '
+ '[' + cluster.cluster_name + ']',
'info')
session.commit()
- return redirect("/druiddatasourcemodelview/list/")
+ return redirect('/druiddatasourcemodelview/list/')
@has_access
- @expose("/scan_new_datasources/")
+ @expose('/scan_new_datasources/')
def scan_new_datasources(self):
"""
Calling this endpoint will cause a scan for new
@@ -311,21 +311,21 @@ def scan_new_datasources(self):
appbuilder.add_view_no_menu(Druid)
appbuilder.add_link(
- "Scan New Datasources",
- label=__("Scan New Datasources"),
+ 'Scan New Datasources',
+ label=__('Scan New Datasources'),
href='/druid/scan_new_datasources/',
category='Sources',
- category_label=__("Sources"),
+ category_label=__('Sources'),
category_icon='fa-database',
- icon="fa-refresh")
+ icon='fa-refresh')
appbuilder.add_link(
- "Refresh Druid Metadata",
- label=__("Refresh Druid Metadata"),
+ 'Refresh Druid Metadata',
+ label=__('Refresh Druid Metadata'),
href='/druid/refresh_datasources/',
category='Sources',
- category_label=__("Sources"),
+ category_label=__('Sources'),
category_icon='fa-database',
- icon="fa-cog")
+ icon='fa-cog')
-appbuilder.add_separator("Sources", )
+appbuilder.add_separator('Sources', )
diff --git a/superset/connectors/sqla/models.py b/superset/connectors/sqla/models.py
index 8c70db31e606a..d7081d2809765 100644
--- a/superset/connectors/sqla/models.py
+++ b/superset/connectors/sqla/models.py
@@ -138,7 +138,7 @@ def sqla_col(self):
@property
def perm(self):
return (
- "{parent_name}.[{obj.metric_name}](id:{obj.id})"
+ '{parent_name}.[{obj.metric_name}](id:{obj.id})'
).format(obj=self,
parent_name=self.table.full_name) if self.table else None
@@ -155,7 +155,7 @@ class SqlaTable(Model, BaseDatasource):
"""An ORM object for SqlAlchemy table references"""
- type = "table"
+ type = 'table'
query_language = 'sql'
metric_class = SqlMetric
column_class = TableColumn
@@ -177,7 +177,7 @@ class SqlaTable(Model, BaseDatasource):
schema = Column(String(255))
sql = Column(Text)
- baselink = "tablemodelview"
+ baselink = 'tablemodelview'
export_fields = (
'table_name', 'main_dttm_col', 'description', 'default_endpoint',
'database_id', 'offset', 'cache_timeout', 'schema',
@@ -212,14 +212,14 @@ def schema_perm(self):
def get_perm(self):
return (
- "[{obj.database}].[{obj.table_name}]"
- "(id:{obj.id})").format(obj=self)
+ '[{obj.database}].[{obj.table_name}]'
+ '(id:{obj.id})').format(obj=self)
@property
def name(self):
if not self.schema:
return self.table_name
- return "{}.{}".format(self.schema, self.table_name)
+ return '{}.{}'.format(self.schema, self.table_name)
@property
def full_name(self):
@@ -251,18 +251,18 @@ def html(self):
return df.to_html(
index=False,
classes=(
- "dataframe table table-striped table-bordered "
- "table-condensed"))
+ 'dataframe table table-striped table-bordered '
+ 'table-condensed'))
@property
def sql_url(self):
- return self.database.sql_url + "?table_name=" + str(self.table_name)
+ return self.database.sql_url + '?table_name=' + str(self.table_name)
@property
def time_column_grains(self):
return {
- "time_columns": self.dttm_cols,
- "time_grains": [grain.name for grain in self.database.grains()],
+ 'time_columns': self.dttm_cols,
+ 'time_grains': [grain.name for grain in self.database.grains()],
}
def get_col(self, col_name):
@@ -304,9 +304,9 @@ def values_for_column(self, column_name, limit=10000):
qry = qry.where(tp.process_template(self.fetch_values_predicate))
engine = self.database.get_sqla_engine()
- sql = "{}".format(
+ sql = '{}'.format(
qry.compile(
- engine, compile_kwargs={"literal_binds": True}, ),
+ engine, compile_kwargs={'literal_binds': True}, ),
)
df = pd.read_sql_query(sql=sql, con=engine)
@@ -322,7 +322,7 @@ def get_query_str(self, query_obj):
sql = str(
qry.compile(
engine,
- compile_kwargs={"literal_binds": True},
+ compile_kwargs={'literal_binds': True},
),
)
logging.info(sql)
@@ -389,10 +389,10 @@ def get_sqla_query( # sqla
if not granularity and is_timeseries:
raise Exception(_(
- "Datetime column not provided as part table configuration "
- "and is required by this type of chart"))
+ 'Datetime column not provided as part table configuration '
+ 'and is required by this type of chart'))
if not groupby and not metrics and not columns:
- raise Exception(_("Empty query?"))
+ raise Exception(_('Empty query?'))
for m in metrics:
if m not in metrics_dict:
raise Exception(_("Metric '{}' is not valid".format(m)))
@@ -400,7 +400,7 @@ def get_sqla_query( # sqla
if metrics_exprs:
main_metric_expr = metrics_exprs[0]
else:
- main_metric_expr = literal_column("COUNT(*)").label("ccount")
+ main_metric_expr = literal_column('COUNT(*)').label('ccount')
select_exprs = []
groupby_exprs = []
@@ -465,7 +465,7 @@ def get_sqla_query( # sqla
# For backwards compatibility and edge cases
# where a column data type might have changed
if isinstance(v, basestring):
- v = v.strip("'").strip('"')
+ v = v.strip(""").strip(""")
if col_obj.is_num:
v = utils.string_to_num(v)
@@ -600,9 +600,9 @@ def fetch_metadata(self):
try:
datatype = col.type.compile(dialect=db_dialect).upper()
except Exception as e:
- datatype = "UNKNOWN"
+ datatype = 'UNKNOWN'
logging.error(
- "Unrecognized data type in {}.{}".format(table, col.name))
+ 'Unrecognized data type in {}.{}'.format(table, col.name))
logging.exception(e)
dbcol = dbcols.get(col.name, None)
if not dbcol:
@@ -622,35 +622,35 @@ def fetch_metadata(self):
metric_name='sum__' + dbcol.column_name,
verbose_name='sum__' + dbcol.column_name,
metric_type='sum',
- expression="SUM({})".format(quoted),
+ expression='SUM({})'.format(quoted),
))
if dbcol.avg:
metrics.append(M(
metric_name='avg__' + dbcol.column_name,
verbose_name='avg__' + dbcol.column_name,
metric_type='avg',
- expression="AVG({})".format(quoted),
+ expression='AVG({})'.format(quoted),
))
if dbcol.max:
metrics.append(M(
metric_name='max__' + dbcol.column_name,
verbose_name='max__' + dbcol.column_name,
metric_type='max',
- expression="MAX({})".format(quoted),
+ expression='MAX({})'.format(quoted),
))
if dbcol.min:
metrics.append(M(
metric_name='min__' + dbcol.column_name,
verbose_name='min__' + dbcol.column_name,
metric_type='min',
- expression="MIN({})".format(quoted),
+ expression='MIN({})'.format(quoted),
))
if dbcol.count_distinct:
metrics.append(M(
metric_name='count_distinct__' + dbcol.column_name,
verbose_name='count_distinct__' + dbcol.column_name,
metric_type='count_distinct',
- expression="COUNT(DISTINCT {})".format(quoted),
+ expression='COUNT(DISTINCT {})'.format(quoted),
))
dbcol.type = datatype
@@ -658,7 +658,7 @@ def fetch_metadata(self):
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
- expression="COUNT(*)",
+ expression='COUNT(*)',
))
dbmetrics = db.session.query(M).filter(M.table_id == self.id).filter(
diff --git a/superset/connectors/sqla/views.py b/superset/connectors/sqla/views.py
index 586c776368a10..47358fa3353b0 100644
--- a/superset/connectors/sqla/views.py
+++ b/superset/connectors/sqla/views.py
@@ -39,54 +39,54 @@ class TableColumnInlineView(CompactCRUDMixin, SupersetModelView): # noqa
page_size = 500
description_columns = {
'is_dttm': _(
- "Whether to make this column available as a "
- "[Time Granularity] option, column has to be DATETIME or "
- "DATETIME-like"),
+ 'Whether to make this column available as a '
+ '[Time Granularity] option, column has to be DATETIME or '
+ 'DATETIME-like'),
'filterable': _(
- "Whether this column is exposed in the `Filters` section "
- "of the explore view."),
+ 'Whether this column is exposed in the `Filters` section '
+ 'of the explore view.'),
'type': _(
- "The data type that was inferred by the database. "
- "It may be necessary to input a type manually for "
- "expression-defined columns in some cases. In most case "
- "users should not need to alter this."),
+ 'The data type that was inferred by the database. '
+ 'It may be necessary to input a type manually for '
+ 'expression-defined columns in some cases. In most case '
+ 'users should not need to alter this.'),
'expression': utils.markdown(
- "a valid SQL expression as supported by the underlying backend. "
- "Example: `substr(name, 1, 1)`", True),
+ 'a valid SQL expression as supported by the underlying backend. '
+ 'Example: `substr(name, 1, 1)`', True),
'python_date_format': utils.markdown(Markup(
- "The pattern of timestamp format, use "
- ""
- "python datetime string pattern "
- "expression. If time is stored in epoch "
- "format, put `epoch_s` or `epoch_ms`. Leave `Database Expression` "
- "below empty if timestamp is stored in "
- "String or Integer(epoch) type"), True),
+ 'The pattern of timestamp format, use '
+ ''
+ 'python datetime string pattern '
+ 'expression. If time is stored in epoch '
+ 'format, put `epoch_s` or `epoch_ms`. Leave `Database Expression` '
+ 'below empty if timestamp is stored in '
+ 'String or Integer(epoch) type'), True),
'database_expression': utils.markdown(
- "The database expression to cast internal datetime "
- "constants to database date/timestamp type according to the DBAPI. "
- "The expression should follow the pattern of "
- "%Y-%m-%d %H:%M:%S, based on different DBAPI. "
- "The string should be a python string formatter \n"
- "`Ex: TO_DATE('{}', 'YYYY-MM-DD HH24:MI:SS')` for Oracle"
- "Superset uses default expression based on DB URI if this "
- "field is blank.", True),
+ 'The database expression to cast internal datetime '
+ 'constants to database date/timestamp type according to the DBAPI. '
+ 'The expression should follow the pattern of '
+ '%Y-%m-%d %H:%M:%S, based on different DBAPI. '
+ 'The string should be a python string formatter \n'
+ "`Ex: TO_DATE('{}', 'YYYY-MM-DD HH24:MI:SS')` for Oracle "
+ 'Superset uses default expression based on DB URI if this '
+ 'field is blank.', True),
}
label_columns = {
- 'column_name': _("Column"),
- 'verbose_name': _("Verbose Name"),
- 'description': _("Description"),
- 'groupby': _("Groupable"),
- 'filterable': _("Filterable"),
- 'table': _("Table"),
- 'count_distinct': _("Count Distinct"),
- 'sum': _("Sum"),
- 'min': _("Min"),
- 'max': _("Max"),
- 'expression': _("Expression"),
- 'is_dttm': _("Is temporal"),
- 'python_date_format': _("Datetime Format"),
- 'database_expression': _("Database Expression"),
+ 'column_name': _('Column'),
+ 'verbose_name': _('Verbose Name'),
+ 'description': _('Description'),
+ 'groupby': _('Groupable'),
+ 'filterable': _('Filterable'),
+ 'table': _('Table'),
+ 'count_distinct': _('Count Distinct'),
+ 'sum': _('Sum'),
+ 'min': _('Min'),
+ 'max': _('Max'),
+ 'expression': _('Expression'),
+ 'is_dttm': _('Is temporal'),
+ 'python_date_format': _('Datetime Format'),
+ 'database_expression': _('Database Expression'),
'type': _('Type'),
}
@@ -108,30 +108,30 @@ class SqlMetricInlineView(CompactCRUDMixin, SupersetModelView): # noqa
'expression', 'table', 'd3format', 'is_restricted', 'warning_text']
description_columns = {
'expression': utils.markdown(
- "a valid SQL expression as supported by the underlying backend. "
- "Example: `count(DISTINCT userid)`", True),
- 'is_restricted': _("Whether the access to this metric is restricted "
- "to certain roles. Only roles with the permission "
+ 'a valid SQL expression as supported by the underlying backend. '
+ 'Example: `count(DISTINCT userid)`', True),
+ 'is_restricted': _('Whether the access to this metric is restricted '
+ 'to certain roles. Only roles with the permission '
"'metric access on XXX (the name of this metric)' "
- "are allowed to access this metric"),
+ 'are allowed to access this metric'),
'd3format': utils.markdown(
- "d3 formatting string as defined [here]"
- "(https://github.com/d3/d3-format/blob/master/README.md#format). "
- "For instance, this default formatting applies in the Table "
- "visualization and allow for different metric to use different "
- "formats", True,
+ 'd3 formatting string as defined [here]'
+ '(https://github.com/d3/d3-format/blob/master/README.md#format). '
+ 'For instance, this default formatting applies in the Table '
+ 'visualization and allow for different metric to use different '
+ 'formats', True,
),
}
add_columns = edit_columns
page_size = 500
label_columns = {
- 'metric_name': _("Metric"),
- 'description': _("Description"),
- 'verbose_name': _("Verbose Name"),
- 'metric_type': _("Type"),
- 'expression': _("SQL Expression"),
- 'table': _("Table"),
- 'd3format': _("D3 Format"),
+ 'metric_name': _('Metric'),
+ 'description': _('Description'),
+ 'verbose_name': _('Verbose Name'),
+ 'metric_type': _('Type'),
+ 'expression': _('SQL Expression'),
+ 'table': _('Table'),
+ 'd3format': _('D3 Format'),
'is_restricted': _('Is Restricted'),
'warning_text': _('Warning Message'),
}
@@ -174,56 +174,56 @@ class TableModelView(DatasourceModelView, DeleteMixin): # noqa
)
description_columns = {
'slices': _(
- "The list of slices associated with this table. By "
- "altering this datasource, you may change how these associated "
- "slices behave. "
- "Also note that slices need to point to a datasource, so "
- "this form will fail at saving if removing slices from a "
- "datasource. If you want to change the datasource for a slice, "
+ 'The list of slices associated with this table. By '
+ 'altering this datasource, you may change how these associated '
+ 'slices behave. '
+ 'Also note that slices need to point to a datasource, so '
+ 'this form will fail at saving if removing slices from a '
+ 'datasource. If you want to change the datasource for a slice, '
"overwrite the slice from the 'explore view'"),
- 'offset': _("Timezone offset (in hours) for this datasource"),
+ 'offset': _('Timezone offset (in hours) for this datasource'),
'table_name': _(
- "Name of the table that exists in the source database"),
+ 'Name of the table that exists in the source database'),
'schema': _(
- "Schema, as used only in some databases like Postgres, Redshift "
- "and DB2"),
+ 'Schema, as used only in some databases like Postgres, Redshift '
+ 'and DB2'),
'description': Markup(
- "Supports "
- "markdown"),
+ 'Supports '
+ 'markdown'),
'sql': _(
- "This fields acts a Superset view, meaning that Superset will "
- "run a query against this string as a subquery.",
+ 'This fields acts a Superset view, meaning that Superset will '
+ 'run a query against this string as a subquery.',
),
'fetch_values_predicate': _(
- "Predicate applied when fetching distinct value to "
- "populate the filter control component. Supports "
- "jinja template syntax. Applies only when "
- "`Enable Filter Select` is on.",
+ 'Predicate applied when fetching distinct value to '
+ 'populate the filter control component. Supports '
+ 'jinja template syntax. Applies only when '
+ '`Enable Filter Select` is on.',
),
'default_endpoint': _(
- "Redirects to this endpoint when clicking on the table "
- "from the table list"),
+ 'Redirects to this endpoint when clicking on the table '
+ 'from the table list'),
'filter_select_enabled': _(
"Whether to populate the filter's dropdown in the explore "
"view's filter section with a list of distinct values fetched "
- "from the backend on the fly"),
+ 'from the backend on the fly'),
}
base_filters = [['id', DatasourceFilter, lambda: []]]
label_columns = {
- 'slices': _("Associated Slices"),
- 'link': _("Table"),
- 'changed_by_': _("Changed By"),
- 'database': _("Database"),
- 'changed_on_': _("Last Changed"),
- 'filter_select_enabled': _("Enable Filter Select"),
- 'schema': _("Schema"),
+ 'slices': _('Associated Slices'),
+ 'link': _('Table'),
+ 'changed_by_': _('Changed By'),
+ 'database': _('Database'),
+ 'changed_on_': _('Last Changed'),
+ 'filter_select_enabled': _('Enable Filter Select'),
+ 'schema': _('Schema'),
'default_endpoint': _('Default Endpoint'),
- 'offset': _("Offset"),
- 'cache_timeout': _("Cache Timeout"),
- 'table_name': _("Table Name"),
+ 'offset': _('Offset'),
+ 'cache_timeout': _('Cache Timeout'),
+ 'table_name': _('Table Name'),
'fetch_values_predicate': _('Fetch Values Predicate'),
- 'owner': _("Owner"),
- 'main_dttm_col': _("Main Datetime Column"),
+ 'owner': _('Owner'),
+ 'main_dttm_col': _('Main Datetime Column'),
'description': _('Description'),
}
@@ -240,10 +240,10 @@ def pre_add(self, table):
# Fail before adding if the table can't be found
if not table.database.has_table(table):
raise Exception(_(
- "Table [{}] could not be found, "
- "please double check your "
- "database connection, schema, and "
- "table name").format(table.name))
+ 'Table [{}] could not be found, '
+ 'please double check your '
+ 'database connection, schema, and '
+ 'table name').format(table.name))
def post_add(self, table, flash_message=True):
table.fetch_metadata()
@@ -253,10 +253,10 @@ def post_add(self, table, flash_message=True):
if flash_message:
flash(_(
- "The table was created. "
- "As part of this two phase configuration "
- "process, you should now click the edit button by "
- "the new table to configure it."), "info")
+ 'The table was created. '
+ 'As part of this two phase configuration '
+ 'process, you should now click the edit button by '
+ 'the new table to configure it.'), 'info')
def post_update(self, table):
self.post_add(table, flash_message=False)
@@ -274,26 +274,26 @@ def edit(self, pk):
return redirect('/superset/explore/table/{}/'.format(pk))
@action(
- "refresh",
- __("Refresh Metadata"),
- __("Refresh column metadata"),
- "fa-refresh")
+ 'refresh',
+ __('Refresh Metadata'),
+ __('Refresh column metadata'),
+ 'fa-refresh')
def refresh(self, tables):
for t in tables:
t.fetch_metadata()
msg = _(
- "Metadata refreshed for the following table(s): %(tables)s",
- tables=", ".join([t.table_name for t in tables]))
+ 'Metadata refreshed for the following table(s): %(tables)s',
+ tables=', '.join([t.table_name for t in tables]))
flash(msg, 'info')
return redirect('/tablemodelview/list/')
appbuilder.add_view(
TableModelView,
- "Tables",
- label=__("Tables"),
- category="Sources",
- category_label=__("Sources"),
+ 'Tables',
+ label=__('Tables'),
+ category='Sources',
+ category_label=__('Sources'),
icon='fa-table',)
-appbuilder.add_separator("Sources")
+appbuilder.add_separator('Sources')
diff --git a/superset/db_engine_specs.py b/superset/db_engine_specs.py
index 136b6594a2d4d..23254f22c363e 100644
--- a/superset/db_engine_specs.py
+++ b/superset/db_engine_specs.py
@@ -106,7 +106,7 @@ def fetch_result_sets(cls, db, datasource_type, force=False):
all_result_sets += [
'{}.{}'.format(schema, t) for t in result_sets[schema]]
if all_result_sets:
- result_sets[""] = all_result_sets
+ result_sets[''] = all_result_sets
return result_sets
@classmethod
@@ -139,7 +139,7 @@ def adjust_database_uri(cls, uri, selected_schema):
For those it's probably better to not alter the database
component of the URI with the schema name, it won't work.
- Some database drivers like presto accept "{catalog}/{schema}" in
+ Some database drivers like presto accept '{catalog}/{schema}' in
the database component of the URL, that can be handled here.
"""
return uri
@@ -211,15 +211,15 @@ class PostgresEngineSpec(BaseEngineSpec):
engine = 'postgresql'
time_grains = (
- Grain("Time Column", _('Time Column'), "{col}"),
- Grain("second", _('second'), "DATE_TRUNC('second', \"{col}\")"),
- Grain("minute", _('minute'), "DATE_TRUNC('minute', \"{col}\")"),
- Grain("hour", _('hour'), "DATE_TRUNC('hour', \"{col}\")"),
- Grain("day", _('day'), "DATE_TRUNC('day', \"{col}\")"),
- Grain("week", _('week'), "DATE_TRUNC('week', \"{col}\")"),
- Grain("month", _('month'), "DATE_TRUNC('month', \"{col}\")"),
- Grain("quarter", _('quarter'), "DATE_TRUNC('quarter', \"{col}\")"),
- Grain("year", _('year'), "DATE_TRUNC('year', \"{col}\")"),
+ Grain('Time Column', _('Time Column'), '{col}'),
+ Grain('second', _('second'), "DATE_TRUNC('second', '{col}')"),
+ Grain('minute', _('minute'), "DATE_TRUNC('minute', '{col}')"),
+ Grain('hour', _('hour'), "DATE_TRUNC('hour', '{col}')"),
+ Grain('day', _('day'), "DATE_TRUNC('day', '{col}')"),
+ Grain('week', _('week'), "DATE_TRUNC('week', '{col}')"),
+ Grain('month', _('month'), "DATE_TRUNC('month', '{col}')"),
+ Grain('quarter', _('quarter'), "DATE_TRUNC('quarter', '{col}')"),
+ Grain('year', _('year'), "DATE_TRUNC('year', '{col}')"),
)
@classmethod
@@ -288,9 +288,9 @@ class SqliteEngineSpec(BaseEngineSpec):
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('day', _('day'), 'DATE({col})'),
- Grain("week", _('week'),
+ Grain('week', _('week'),
"DATE({col}, -strftime('%w', {col}) || ' days')"),
- Grain("month", _('month'),
+ Grain('month', _('month'),
"DATE({col}, -strftime('%d', {col}) || ' days', '+1 day')"),
)
@@ -314,7 +314,7 @@ def fetch_result_sets(cls, db, datasource_type, force=False):
all_result_sets += [
'{}.{}'.format(schema, t) for t in result_sets[schema]]
if all_result_sets:
- result_sets[""] = all_result_sets
+ result_sets[''] = all_result_sets
return result_sets
@classmethod
@@ -335,25 +335,25 @@ class MySQLEngineSpec(BaseEngineSpec):
cursor_execute_kwargs = {'args': {}}
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
- Grain("second", _('second'), "DATE_ADD(DATE({col}), "
- "INTERVAL (HOUR({col})*60*60 + MINUTE({col})*60"
- " + SECOND({col})) SECOND)"),
- Grain("minute", _('minute'), "DATE_ADD(DATE({col}), "
- "INTERVAL (HOUR({col})*60 + MINUTE({col})) MINUTE)"),
- Grain("hour", _('hour'), "DATE_ADD(DATE({col}), "
- "INTERVAL HOUR({col}) HOUR)"),
+ Grain('second', _('second'), 'DATE_ADD(DATE({col}), '
+ 'INTERVAL (HOUR({col})*60*60 + MINUTE({col})*60'
+ ' + SECOND({col})) SECOND)'),
+ Grain('minute', _('minute'), 'DATE_ADD(DATE({col}), '
+ 'INTERVAL (HOUR({col})*60 + MINUTE({col})) MINUTE)'),
+ Grain('hour', _('hour'), 'DATE_ADD(DATE({col}), '
+ 'INTERVAL HOUR({col}) HOUR)'),
Grain('day', _('day'), 'DATE({col})'),
- Grain("week", _('week'), "DATE(DATE_SUB({col}, "
- "INTERVAL DAYOFWEEK({col}) - 1 DAY))"),
- Grain("month", _('month'), "DATE(DATE_SUB({col}, "
- "INTERVAL DAYOFMONTH({col}) - 1 DAY))"),
- Grain("quarter", _('quarter'), "MAKEDATE(YEAR({col}), 1) "
- "+ INTERVAL QUARTER({col}) QUARTER - INTERVAL 1 QUARTER"),
- Grain("year", _('year'), "DATE(DATE_SUB({col}, "
- "INTERVAL DAYOFYEAR({col}) - 1 DAY))"),
- Grain("week_start_monday", _('week_start_monday'),
- "DATE(DATE_SUB({col}, "
- "INTERVAL DAYOFWEEK(DATE_SUB({col}, INTERVAL 1 DAY)) - 1 DAY))"),
+ Grain('week', _('week'), 'DATE(DATE_SUB({col}, '
+ 'INTERVAL DAYOFWEEK({col}) - 1 DAY))'),
+ Grain('month', _('month'), 'DATE(DATE_SUB({col}, '
+ 'INTERVAL DAYOFMONTH({col}) - 1 DAY))'),
+ Grain('quarter', _('quarter'), 'MAKEDATE(YEAR({col}), 1) '
+ '+ INTERVAL QUARTER({col}) QUARTER - INTERVAL 1 QUARTER'),
+ Grain('year', _('year'), 'DATE(DATE_SUB({col}, '
+ 'INTERVAL DAYOFYEAR({col}) - 1 DAY))'),
+ Grain('week_start_monday', _('week_start_monday'),
+ 'DATE(DATE_SUB({col}, '
+ 'INTERVAL DAYOFWEEK(DATE_SUB({col}, INTERVAL 1 DAY)) - 1 DAY))'),
)
@classmethod
@@ -371,7 +371,7 @@ def adjust_database_uri(cls, uri, selected_schema=None):
@classmethod
def epoch_to_dttm(cls):
- return "from_unixtime({col})"
+ return 'from_unixtime({col})'
@classmethod
def extract_error_message(cls, e):
@@ -405,10 +405,10 @@ class PrestoEngineSpec(BaseEngineSpec):
"date_trunc('month', CAST({col} AS TIMESTAMP))"),
Grain('quarter', _('quarter'),
"date_trunc('quarter', CAST({col} AS TIMESTAMP))"),
- Grain("week_ending_saturday", _('week_ending_saturday'),
+ Grain('week_ending_saturday', _('week_ending_saturday'),
"date_add('day', 5, date_trunc('week', date_add('day', 1, "
- "CAST({col} AS TIMESTAMP))))"),
- Grain("week_start_sunday", _('week_start_sunday'),
+ 'CAST({col} AS TIMESTAMP))))'),
+ Grain('week_start_sunday', _('week_start_sunday'),
"date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))"),
)
@@ -432,7 +432,7 @@ def adjust_database_uri(cls, uri, selected_schema=None):
@classmethod
def escape_sql(cls, sql):
- return re.sub(r'%%|%', "%%", sql)
+ return re.sub(r'%%|%', '%%', sql)
@classmethod
def convert_dttm(cls, target_type, dttm):
@@ -445,7 +445,7 @@ def convert_dttm(cls, target_type, dttm):
@classmethod
def epoch_to_dttm(cls):
- return "from_unixtime({col})"
+ return 'from_unixtime({col})'
@classmethod
@cache_util.memoized_func(
@@ -467,7 +467,7 @@ def fetch_result_sets(cls, db, datasource_type, force=False):
result_sets = defaultdict(list)
for unused, row in result_set_df.iterrows():
result_sets[row['table_schema']].append(row['table_name'])
- result_sets[""].append('{}.{}'.format(
+ result_sets[''].append('{}.{}'.format(
row['table_schema'], row['table_name']))
return result_sets
@@ -479,7 +479,7 @@ def extra_table_metadata(cls, database, table_name, schema_name):
cols = indexes[0].get('column_names', [])
full_table_name = table_name
if schema_name and '.' not in table_name:
- full_table_name = "{}.{}".format(schema_name, table_name)
+ full_table_name = '{}.{}'.format(schema_name, table_name)
pql = cls._partition_query(full_table_name)
col_name, latest_part = cls.latest_partition(
table_name, schema_name, database, show_first=True)
@@ -554,7 +554,7 @@ def _partition_query(
:param filters: a list of filters to apply
:param filters: dict of field name and filter value combinations
"""
- limit_clause = "LIMIT {}".format(limit) if limit else ''
+ limit_clause = 'LIMIT {}'.format(limit) if limit else ''
order_by_clause = ''
if order_by:
l = [] # noqa: E741
@@ -603,12 +603,12 @@ def latest_partition(cls, table_name, schema, database, show_first=False):
indexes = database.get_indexes(table_name, schema)
if len(indexes[0]['column_names']) < 1:
raise SupersetTemplateException(
- "The table should have one partitioned field")
+ 'The table should have one partitioned field')
elif not show_first and len(indexes[0]['column_names']) > 1:
raise SupersetTemplateException(
- "The table should have a single partitioned field "
- "to use this function. You may want to use "
- "`presto.latest_sub_partition`")
+ 'The table should have a single partitioned field '
+ 'to use this function. You may want to use '
+ '`presto.latest_sub_partition`')
part_field = indexes[0]['column_names'][0]
sql = cls._partition_query(table_name, 1, [(part_field, True)])
df = database.get_df(sql, schema)
@@ -645,12 +645,12 @@ def latest_sub_partition(cls, table_name, schema, database, **kwargs):
part_fields = indexes[0]['column_names']
for k in kwargs.keys():
if k not in k in part_fields:
- msg = "Field [{k}] is not part of the portioning key"
+ msg = 'Field [{k}] is not part of the portioning key'
raise SupersetTemplateException(msg)
if len(kwargs.keys()) != len(part_fields) - 1:
msg = (
- "A filter needs to be specified for {} out of the "
- "{} fields."
+ 'A filter needs to be specified for {} out of the '
+ '{} fields.'
).format(len(part_fields) - 1, len(part_fields))
raise SupersetTemplateException(msg)
@@ -755,9 +755,9 @@ def progress(cls, log_lines):
reduce_progress = int(match.groupdict()['reduce_progress'])
stages[stage_number] = (map_progress + reduce_progress) / 2
logging.info(
- "Progress detail: {}, "
- "current job {}, "
- "total jobs: {}".format(stages, current_job, total_jobs))
+ 'Progress detail: {}, '
+ 'current job {}, '
+ 'total jobs: {}'.format(stages, current_job, total_jobs))
stage_progress = sum(
stages.values()) / len(stages.values()) if stages else 0
@@ -769,7 +769,7 @@ def progress(cls, log_lines):
@classmethod
def get_tracking_url(cls, log_lines):
- lkp = "Tracking URL = "
+ lkp = 'Tracking URL = '
for line in log_lines:
if lkp in line:
return line.split(lkp)[1]
@@ -796,7 +796,7 @@ def handle_cursor(cls, cursor, query, session):
if log:
log_lines = log.splitlines()
progress = cls.progress(log_lines)
- logging.info("Progress total: {}".format(progress))
+ logging.info('Progress total: {}'.format(progress))
needs_commit = False
if progress > query.progress:
query.progress = progress
@@ -806,19 +806,19 @@ def handle_cursor(cls, cursor, query, session):
if tracking_url:
job_id = tracking_url.split('/')[-2]
logging.info(
- "Found the tracking url: {}".format(tracking_url))
+ 'Found the tracking url: {}'.format(tracking_url))
tracking_url = tracking_url_trans(tracking_url)
logging.info(
- "Transformation applied: {}".format(tracking_url))
+ 'Transformation applied: {}'.format(tracking_url))
query.tracking_url = tracking_url
- logging.info("Job id: {}".format(job_id))
+ logging.info('Job id: {}'.format(job_id))
needs_commit = True
if job_id and len(log_lines) > last_log_line:
# Wait for job id before logging things out
# this allows for prefixing all log lines and becoming
# searchable in something like Kibana
for l in log_lines[last_log_line:]:
- logging.info("[{}] {}".format(job_id, l))
+ logging.info('[{}] {}'.format(job_id, l))
last_log_line = len(log_lines)
if needs_commit:
session.commit()
@@ -852,7 +852,7 @@ def _latest_partition_from_df(cls, df):
@classmethod
def _partition_query(
cls, table_name, limit=0, order_by=None, filters=None):
- return "SHOW PARTITIONS {table_name}".format(**locals())
+ return 'SHOW PARTITIONS {table_name}'.format(**locals())
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user, username):
@@ -881,9 +881,9 @@ def get_configuration_for_impersonation(cls, uri, impersonate_user, username):
backend_name = url.get_backend_name()
# Must be Hive connection, enable impersonation, and set param auth=LDAP|KERBEROS
- if (backend_name == "hive" and "auth" in url.query.keys() and
+ if (backend_name == 'hive' and 'auth' in url.query.keys() and
impersonate_user is True and username is not None):
- configuration["hive.server2.proxy.user"] = username
+ configuration['hive.server2.proxy.user'] = username
return configuration
@@ -892,27 +892,27 @@ class MssqlEngineSpec(BaseEngineSpec):
epoch_to_dttm = "dateadd(S, {col}, '1970-01-01')"
time_grains = (
- Grain("Time Column", _('Time Column'), "{col}"),
- Grain("second", _('second'), "DATEADD(second, "
+ Grain('Time Column', _('Time Column'), '{col}'),
+ Grain('second', _('second'), 'DATEADD(second, '
"DATEDIFF(second, '2000-01-01', {col}), '2000-01-01')"),
- Grain("minute", _('minute'), "DATEADD(minute, "
- "DATEDIFF(minute, 0, {col}), 0)"),
- Grain("5 minute", _('5 minute'), "DATEADD(minute, "
- "DATEDIFF(minute, 0, {col}) / 5 * 5, 0)"),
- Grain("half hour", _('half hour'), "DATEADD(minute, "
- "DATEDIFF(minute, 0, {col}) / 30 * 30, 0)"),
- Grain("hour", _('hour'), "DATEADD(hour, "
- "DATEDIFF(hour, 0, {col}), 0)"),
- Grain("day", _('day'), "DATEADD(day, "
- "DATEDIFF(day, 0, {col}), 0)"),
- Grain("week", _('week'), "DATEADD(week, "
- "DATEDIFF(week, 0, {col}), 0)"),
- Grain("month", _('month'), "DATEADD(month, "
- "DATEDIFF(month, 0, {col}), 0)"),
- Grain("quarter", _('quarter'), "DATEADD(quarter, "
- "DATEDIFF(quarter, 0, {col}), 0)"),
- Grain("year", _('year'), "DATEADD(year, "
- "DATEDIFF(year, 0, {col}), 0)"),
+ Grain('minute', _('minute'), 'DATEADD(minute, '
+ 'DATEDIFF(minute, 0, {col}), 0)'),
+ Grain('5 minute', _('5 minute'), 'DATEADD(minute, '
+ 'DATEDIFF(minute, 0, {col}) / 5 * 5, 0)'),
+ Grain('half hour', _('half hour'), 'DATEADD(minute, '
+ 'DATEDIFF(minute, 0, {col}) / 30 * 30, 0)'),
+ Grain('hour', _('hour'), 'DATEADD(hour, '
+ 'DATEDIFF(hour, 0, {col}), 0)'),
+ Grain('day', _('day'), 'DATEADD(day, '
+ 'DATEDIFF(day, 0, {col}), 0)'),
+ Grain('week', _('week'), 'DATEADD(week, '
+ 'DATEDIFF(week, 0, {col}), 0)'),
+ Grain('month', _('month'), 'DATEADD(month, '
+ 'DATEDIFF(month, 0, {col}), 0)'),
+ Grain('quarter', _('quarter'), 'DATEADD(quarter, '
+ 'DATEDIFF(quarter, 0, {col}), 0)'),
+ Grain('year', _('year'), 'DATEADD(year, '
+ 'DATEDIFF(year, 0, {col}), 0)'),
)
@classmethod
@@ -948,7 +948,7 @@ class OracleEngineSpec(PostgresEngineSpec):
@classmethod
def convert_dttm(cls, target_type, dttm):
return (
- """TO_TIMESTAMP('{}', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')"""
+ """TO_TIMESTAMP('{}', 'YYYY-MM-DD'T'HH24:MI:SS.ff6')"""
).format(dttm.isoformat())
@@ -975,10 +975,10 @@ class AthenaEngineSpec(BaseEngineSpec):
"date_trunc('month', CAST({col} AS TIMESTAMP))"),
Grain('quarter', _('quarter'),
"date_trunc('quarter', CAST({col} AS TIMESTAMP))"),
- Grain("week_ending_saturday", _('week_ending_saturday'),
+ Grain('week_ending_saturday', _('week_ending_saturday'),
"date_add('day', 5, date_trunc('week', date_add('day', 1, "
- "CAST({col} AS TIMESTAMP))))"),
- Grain("week_start_sunday", _('week_start_sunday'),
+ 'CAST({col} AS TIMESTAMP))))'),
+ Grain('week_start_sunday', _('week_start_sunday'),
"date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))"),
)
@@ -995,7 +995,7 @@ def convert_dttm(cls, target_type, dttm):
@classmethod
def epoch_to_dttm(cls):
- return "from_unixtime({col})"
+ return 'from_unixtime({col})'
class ClickHouseEngineSpec(BaseEngineSpec):
@@ -1008,21 +1008,21 @@ class ClickHouseEngineSpec(BaseEngineSpec):
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('minute', _('minute'),
- "toStartOfMinute(toDateTime({col}))"),
+ 'toStartOfMinute(toDateTime({col}))'),
Grain('5 minute', _('5 minute'),
- "toDateTime(intDiv(toUInt32(toDateTime({col})), 300)*300)"),
+ 'toDateTime(intDiv(toUInt32(toDateTime({col})), 300)*300)'),
Grain('10 minute', _('10 minute'),
- "toDateTime(intDiv(toUInt32(toDateTime({col})), 600)*600)"),
+ 'toDateTime(intDiv(toUInt32(toDateTime({col})), 600)*600)'),
Grain('hour', _('hour'),
- "toStartOfHour(toDateTime({col}))"),
+ 'toStartOfHour(toDateTime({col}))'),
Grain('day', _('day'),
- "toStartOfDay(toDateTime({col}))"),
+ 'toStartOfDay(toDateTime({col}))'),
Grain('month', _('month'),
- "toStartOfMonth(toDateTime({col}))"),
+ 'toStartOfMonth(toDateTime({col}))'),
Grain('quarter', _('quarter'),
- "toStartOfQuarter(toDateTime({col}))"),
+ 'toStartOfQuarter(toDateTime({col}))'),
Grain('year', _('year'),
- "toStartOfYear(toDateTime({col}))"),
+ 'toStartOfYear(toDateTime({col}))'),
)
@classmethod
@@ -1043,22 +1043,22 @@ class BQEngineSpec(BaseEngineSpec):
engine = 'bigquery'
time_grains = (
- Grain("Time Column", _('Time Column'), "{col}"),
- Grain("second", _('second'), "TIMESTAMP_TRUNC({col}, SECOND)"),
- Grain("minute", _('minute'), "TIMESTAMP_TRUNC({col}, MINUTE)"),
- Grain("hour", _('hour'), "TIMESTAMP_TRUNC({col}, HOUR)"),
- Grain("day", _('day'), "TIMESTAMP_TRUNC({col}, DAY)"),
- Grain("week", _('week'), "TIMESTAMP_TRUNC({col}, WEEK)"),
- Grain("month", _('month'), "TIMESTAMP_TRUNC({col}, MONTH)"),
- Grain("quarter", _('quarter'), "TIMESTAMP_TRUNC({col}, QUARTER)"),
- Grain("year", _('year'), "TIMESTAMP_TRUNC({col}, YEAR)"),
+ Grain('Time Column', _('Time Column'), '{col}'),
+ Grain('second', _('second'), 'TIMESTAMP_TRUNC({col}, SECOND)'),
+ Grain('minute', _('minute'), 'TIMESTAMP_TRUNC({col}, MINUTE)'),
+ Grain('hour', _('hour'), 'TIMESTAMP_TRUNC({col}, HOUR)'),
+ Grain('day', _('day'), 'TIMESTAMP_TRUNC({col}, DAY)'),
+ Grain('week', _('week'), 'TIMESTAMP_TRUNC({col}, WEEK)'),
+ Grain('month', _('month'), 'TIMESTAMP_TRUNC({col}, MONTH)'),
+ Grain('quarter', _('quarter'), 'TIMESTAMP_TRUNC({col}, QUARTER)'),
+ Grain('year', _('year'), 'TIMESTAMP_TRUNC({col}, YEAR)'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
- return "'{}'".format(dttm.strftime('%Y-%m-%d'))
+ return "{}'".format(dttm.strftime('%Y-%m-%d'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@@ -1068,21 +1068,21 @@ class ImpalaEngineSpec(BaseEngineSpec):
engine = 'impala'
time_grains = (
- Grain("Time Column", _('Time Column'), "{col}"),
- Grain("minute", _('minute'), "TRUNC({col}, 'MI')"),
- Grain("hour", _('hour'), "TRUNC({col}, 'HH')"),
- Grain("day", _('day'), "TRUNC({col}, 'DD')"),
- Grain("week", _('week'), "TRUNC({col}, 'WW')"),
- Grain("month", _('month'), "TRUNC({col}, 'MONTH')"),
- Grain("quarter", _('quarter'), "TRUNC({col}, 'Q')"),
- Grain("year", _('year'), "TRUNC({col}, 'YYYY')"),
+ Grain('Time Column', _('Time Column'), '{col}'),
+ Grain('minute', _('minute'), "TRUNC({col}, 'MI')"),
+ Grain('hour', _('hour'), "TRUNC({col}, 'HH')"),
+ Grain('day', _('day'), "TRUNC({col}, 'DD')"),
+ Grain('week', _('week'), "TRUNC({col}, 'WW')"),
+ Grain('month', _('month'), "TRUNC({col}, 'MONTH')"),
+ Grain('quarter', _('quarter'), "TRUNC({col}, 'Q')"),
+ Grain('year', _('year'), "TRUNC({col}, 'YYYY')"),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
- return "'{}'".format(dttm.strftime('%Y-%m-%d'))
+ return "{}'".format(dttm.strftime('%Y-%m-%d'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
diff --git a/superset/db_engines/hive.py b/superset/db_engines/hive.py
index 635a73bdfc245..ae3c1eaacee63 100644
--- a/superset/db_engines/hive.py
+++ b/superset/db_engines/hive.py
@@ -22,7 +22,7 @@ def fetch_logs(self, max_rows=1024,
except (ttypes.TApplicationException,
Thrift.TApplicationException):
if self._state == self._STATE_NONE:
- raise hive.ProgrammingError("No query yet")
+ raise hive.ProgrammingError('No query yet')
logs = []
while True:
req = ttypes.TFetchResultsReq(
diff --git a/superset/db_engines/presto.py b/superset/db_engines/presto.py
index 57d04601711a7..eb3246451d120 100644
--- a/superset/db_engines/presto.py
+++ b/superset/db_engines/presto.py
@@ -4,17 +4,17 @@
# TODO(bogdan): Remove this when new pyhive release will be available.
def cancel(self):
if self._state == self._STATE_NONE:
- raise presto.ProgrammingError("No query yet")
+ raise presto.ProgrammingError('No query yet')
if self._nextUri is None:
assert self._state == self._STATE_FINISHED, \
- "Should be finished if nextUri is None"
+ 'Should be finished if nextUri is None'
return
response = presto.requests.delete(self._nextUri)
# pylint: disable=no-member
if response.status_code != presto.requests.codes.no_content:
- fmt = "Unexpected status code after cancel {}\n{}"
+ fmt = 'Unexpected status code after cancel {}\n{}'
raise presto.OperationalError(
fmt.format(response.status_code, response.content))
self._state = self._STATE_FINISHED
diff --git a/superset/legacy.py b/superset/legacy.py
index 7585c75c16593..b89b84f0fd2e7 100644
--- a/superset/legacy.py
+++ b/superset/legacy.py
@@ -16,7 +16,7 @@ def cast_filter_data(form_data):
flts = []
having_flts = []
fd = form_data
- filter_pattern = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
+ filter_pattern = re.compile(r"""((?:[^,"']|"[^"]*"|'[^']*')+)""")
for i in range(0, 10):
for prefix in ['flt', 'having']:
col_str = '{}_col_{}'.format(prefix, i)
diff --git a/superset/models/core.py b/superset/models/core.py
index 130f63e0cbd02..d2989d535e32f 100644
--- a/superset/models/core.py
+++ b/superset/models/core.py
@@ -43,7 +43,7 @@
stats_logger = config.get('STATS_LOGGER')
metadata = Model.metadata # pylint: disable=no-member
-PASSWORD_MASK = "X" * 10
+PASSWORD_MASK = 'X' * 10
def set_related_perm(mapper, connection, target): # noqa
src_class = target.cls_model
@@ -209,19 +209,19 @@ def form_data(self):
def slice_url(self):
"""Defines the url to access the slice"""
return (
- "/superset/explore/{obj.datasource_type}/"
- "{obj.datasource_id}/?form_data={params}".format(
+ '/superset/explore/{obj.datasource_type}/'
+ '{obj.datasource_id}/?form_data={params}'.format(
obj=self, params=parse.quote(json.dumps(self.form_data))))
@property
def slice_id_url(self):
return (
- "/superset/{slc.datasource_type}/{slc.datasource_id}/{slc.id}/"
+ '/superset/{slc.datasource_type}/{slc.datasource_id}/{slc.id}/'
).format(slc=self)
@property
def edit_url(self):
- return "/slicemodelview/edit/{}".format(self.id)
+ return '/slicemodelview/edit/{}'.format(self.id)
@property
def slice_link(self):
@@ -238,9 +238,9 @@ def get_viz(self):
"""
slice_params = json.loads(self.params)
slice_params['slice_id'] = self.id
- slice_params['json'] = "false"
+ slice_params['json'] = 'false'
slice_params['slice_name'] = self.slice_name
- slice_params['viz_type'] = self.viz_type if self.viz_type else "table"
+ slice_params['viz_type'] = self.viz_type if self.viz_type else 'table'
return viz_types[slice_params.get('viz_type')](
self.datasource,
@@ -327,8 +327,8 @@ def __repr__(self):
@property
def table_names(self):
# pylint: disable=no-member
- return ", ".join(
- {"{}".format(s.datasource.full_name) for s in self.slices})
+ return ', '.join(
+ {'{}'.format(s.datasource.full_name) for s in self.slices})
@property
def url(self):
@@ -338,9 +338,9 @@ def url(self):
default_filters = json_metadata.get('default_filters')
if default_filters:
filters = parse.quote(default_filters.encode('utf8'))
- return "/superset/dashboard/{}/?preselect_filters={}".format(
+ return '/superset/dashboard/{}/?preselect_filters={}'.format(
self.slug or self.id, filters)
- return "/superset/dashboard/{}/".format(self.slug or self.id)
+ return '/superset/dashboard/{}/'.format(self.slug or self.id)
@property
def datasources(self):
@@ -538,7 +538,7 @@ class Database(Model, AuditMixinNullable):
"""An ORM object that stores Database related information"""
__tablename__ = 'dbs'
- type = "table"
+ type = 'table'
id = Column(Integer, primary_key=True)
verbose_name = Column(String(250), unique=True)
@@ -633,7 +633,7 @@ def get_sqla_engine(self, schema=None, nullpool=False, user_name=None):
effective_username)
masked_url = self.get_password_masked_url(url)
- logging.info("Database.get_sqla_engine(). Masked URL: {0}".format(masked_url))
+ logging.info('Database.get_sqla_engine(). Masked URL: {0}'.format(masked_url))
params = extra.get('engine_params', {})
if nullpool:
@@ -647,7 +647,7 @@ def get_sqla_engine(self, schema=None, nullpool=False, user_name=None):
self.impersonate_user,
effective_username))
if configuration:
- params["connect_args"] = {"configuration": configuration}
+ params['connect_args'] = {'configuration': configuration}
return create_engine(url, **params)
@@ -676,7 +676,7 @@ def needs_conversion(df_series):
def compile_sqla_query(self, qry, schema=None):
eng = self.get_sqla_engine(schema=schema)
- compiled = qry.compile(eng, compile_kwargs={"literal_binds": True})
+ compiled = qry.compile(eng, compile_kwargs={'literal_binds': True})
return '{}'.format(compiled)
def select_star(
@@ -709,7 +709,7 @@ def all_table_names(self, schema=None, force=False):
if not schema:
tables_dict = self.db_engine_spec.fetch_result_sets(
self, 'table', force=force)
- return tables_dict.get("", [])
+ return tables_dict.get('', [])
return sorted(
self.db_engine_spec.get_table_names(schema, self.inspector))
@@ -717,7 +717,7 @@ def all_view_names(self, schema=None, force=False):
if not schema:
views_dict = self.db_engine_spec.fetch_result_sets(
self, 'view', force=force)
- return views_dict.get("", [])
+ return views_dict.get('', [])
views = []
try:
views = self.inspector.get_view_names(schema)
@@ -796,7 +796,7 @@ def sql_url(self):
def get_perm(self):
return (
- "[{obj.database_name}].(id:{obj.id})").format(obj=self)
+ '[{obj.database_name}].(id:{obj.id})').format(obj=self)
def has_table(self, table):
engine = self.get_sqla_engine()
@@ -851,7 +851,7 @@ def wrapper(*args, **kwargs):
except (ValueError, TypeError):
slice_id = 0
- params = ""
+ params = ''
try:
params = json.dumps(d)
except Exception:
@@ -948,6 +948,6 @@ def user_roles(self):
)
href = 'Extend {} Role'.format(url, r.name)
if r.name in self.ROLES_BLACKLIST:
- href = "{} Role".format(r.name)
+ href = '{} Role'.format(r.name)
action_list = action_list + '
' + href + ''
return ''
diff --git a/superset/models/helpers.py b/superset/models/helpers.py
index f179dbec32934..d4ae9f45e83ad 100644
--- a/superset/models/helpers.py
+++ b/superset/models/helpers.py
@@ -39,8 +39,8 @@ def alter_params(self, **kwargs):
@property
def params_dict(self):
if self.params:
- params = re.sub(",[ \t\r\n]+}", "}", self.params)
- params = re.sub(",[ \t\r\n]+\]", "]", params)
+ params = re.sub(',[ \t\r\n]+}', '}', self.params)
+ params = re.sub(',[ \t\r\n]+\]', ']', params)
return json.loads(params)
else:
return {}
diff --git a/superset/models/sql_lab.py b/superset/models/sql_lab.py
index b4fc8cc711149..44b692b915da9 100644
--- a/superset/models/sql_lab.py
+++ b/superset/models/sql_lab.py
@@ -122,7 +122,7 @@ def name(self):
tab = (self.tab_name.replace(' ', '_').lower()
if self.tab_name else 'notab')
tab = re.sub(r'\W+', '', tab)
- return "sqllab_{tab}_{ts}".format(**locals())
+ return 'sqllab_{tab}_{ts}'.format(**locals())
class SavedQuery(Model, AuditMixinNullable):
diff --git a/superset/security.py b/superset/security.py
index ebb62461c17a6..6c7d768970dff 100644
--- a/superset/security.py
+++ b/superset/security.py
@@ -86,15 +86,15 @@ def is_user_defined_permission(perm):
def get_or_create_main_db():
- logging.info("Creating database reference")
+ logging.info('Creating database reference')
dbobj = (
db.session.query(models.Database)
.filter_by(database_name='main')
.first()
)
if not dbobj:
- dbobj = models.Database(database_name="main")
- dbobj.set_sqlalchemy_uri(conf.get("SQLALCHEMY_DATABASE_URI"))
+ dbobj = models.Database(database_name='main')
+ dbobj.set_sqlalchemy_uri(conf.get('SQLALCHEMY_DATABASE_URI'))
dbobj.expose_in_sqllab = True
dbobj.allow_run_sync = True
db.session.add(dbobj)
@@ -146,7 +146,7 @@ def is_granter_pvm(pvm):
def set_role(role_name, pvm_check):
- logging.info("Syncing {} perms".format(role_name))
+ logging.info('Syncing {} perms'.format(role_name))
sesh = sm.get_session()
pvms = sesh.query(ab_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
@@ -167,7 +167,7 @@ def create_missing_perms():
"""Creates missing perms for datasources, schemas and metrics"""
logging.info(
- "Fetching a set of all perms to lookup which ones are missing")
+ 'Fetching a set of all perms to lookup which ones are missing')
all_pvs = set()
for pv in sm.get_session.query(sm.permissionview_model).all():
if pv.permission and pv.view_menu:
@@ -178,18 +178,18 @@ def merge_pv(view_menu, perm):
if view_menu and perm and (view_menu, perm) not in all_pvs:
merge_perm(sm, view_menu, perm)
- logging.info("Creating missing datasource permissions.")
+ logging.info('Creating missing datasource permissions.')
datasources = ConnectorRegistry.get_all_datasources(db.session)
for datasource in datasources:
merge_pv('datasource_access', datasource.get_perm())
merge_pv('schema_access', datasource.schema_perm)
- logging.info("Creating missing database permissions.")
+ logging.info('Creating missing database permissions.')
databases = db.session.query(models.Database).all()
for database in databases:
merge_pv('database_access', database.perm)
- logging.info("Creating missing metrics permissions")
+ logging.info('Creating missing metrics permissions')
metrics = []
for datasource_class in ConnectorRegistry.sources.values():
metrics += list(db.session.query(datasource_class.metric_class).all())
@@ -201,7 +201,7 @@ def merge_pv(view_menu, perm):
def sync_role_definitions():
"""Inits the Superset application with security roles and such"""
- logging.info("Syncing role definition")
+ logging.info('Syncing role definition')
get_or_create_main_db()
create_custom_permissions()
diff --git a/superset/sql_lab.py b/superset/sql_lab.py
index ab0f96009c756..399faee4de170 100644
--- a/superset/sql_lab.py
+++ b/superset/sql_lab.py
@@ -63,13 +63,13 @@ def get_query(query_id, session, retry_count=5):
except Exception:
attempt += 1
logging.error(
- "Query with id `{}` could not be retrieved".format(query_id))
+ 'Query with id `{}` could not be retrieved'.format(query_id))
stats_logger.incr('error_attempting_orm_query_' + str(attempt))
- logging.error("Sleeping for a sec before retrying...")
+ logging.error('Sleeping for a sec before retrying...')
sleep(1)
if not query:
stats_logger.incr('error_failed_at_getting_orm_query')
- raise SqlLabException("Failed at getting query")
+ raise SqlLabException('Failed at getting query')
return query
@@ -119,9 +119,9 @@ def execute_sql(
def handle_error(msg):
"""Local method handling error while processing the SQL"""
- troubleshooting_link = config["TROUBLESHOOTING_LINK"]
- msg = "Error: {}. You can find common superset errors and their \
- resolutions at: {}".format(msg, troubleshooting_link) \
+ troubleshooting_link = config['TROUBLESHOOTING_LINK']
+ msg = 'Error: {}. You can find common superset errors and their \
+ resolutions at: {}'.format(msg, troubleshooting_link) \
if troubleshooting_link else msg
query.error_message = msg
query.status = QueryStatus.FAILED
@@ -141,12 +141,12 @@ def handle_error(msg):
executed_sql = superset_query.stripped()
if not superset_query.is_select() and not database.allow_dml:
return handle_error(
- "Only `SELECT` statements are allowed against this database")
+ 'Only `SELECT` statements are allowed against this database')
if query.select_as_cta:
if not superset_query.is_select():
return handle_error(
- "Only `SELECT` statements can be used with the CREATE TABLE "
- "feature.")
+ 'Only `SELECT` statements can be used with the CREATE TABLE '
+ 'feature.')
return
if not query.tmp_table_name:
start_dttm = datetime.fromtimestamp(query.start_time)
@@ -164,7 +164,7 @@ def handle_error(msg):
executed_sql = template_processor.process_template(executed_sql)
except Exception as e:
logging.exception(e)
- msg = "Template rendering failed: " + utils.error_msg_from_exception(e)
+ msg = 'Template rendering failed: ' + utils.error_msg_from_exception(e)
return handle_error(msg)
query.executed_sql = executed_sql
@@ -182,13 +182,13 @@ def handle_error(msg):
)
conn = engine.raw_connection()
cursor = conn.cursor()
- logging.info("Running query: \n{}".format(executed_sql))
+ logging.info('Running query: \n{}'.format(executed_sql))
logging.info(query.executed_sql)
cursor.execute(query.executed_sql,
**db_engine_spec.cursor_execute_kwargs)
- logging.info("Handling cursor")
+ logging.info('Handling cursor')
db_engine_spec.handle_cursor(cursor, query, session)
- logging.info("Fetching data: {}".format(query.to_dict()))
+ logging.info('Fetching data: {}'.format(query.to_dict()))
data = db_engine_spec.fetch_data(cursor, query.limit)
except SoftTimeLimitExceeded as e:
logging.exception(e)
@@ -196,14 +196,14 @@ def handle_error(msg):
conn.close()
return handle_error(
"SQL Lab timeout. This environment's policy is to kill queries "
- "after {} seconds.".format(SQLLAB_TIMEOUT))
+ 'after {} seconds.'.format(SQLLAB_TIMEOUT))
except Exception as e:
logging.exception(e)
if conn is not None:
conn.close()
return handle_error(db_engine_spec.extract_error_message(e))
- logging.info("Fetching cursor description")
+ logging.info('Fetching cursor description')
cursor_description = cursor.description
if conn is not None:
@@ -248,7 +248,7 @@ def handle_error(msg):
})
if store_results:
key = '{}'.format(uuid.uuid4())
- logging.info("Storing results in results backend, key: {}".format(key))
+ logging.info('Storing results in results backend, key: {}'.format(key))
json_payload = json.dumps(payload, default=utils.json_iso_dttm_ser)
results_backend.set(key, utils.zlib_compress(json_payload))
query.results_key = key
diff --git a/superset/sql_parse.py b/superset/sql_parse.py
index 7fc31c0a22a1d..d42e891c64552 100644
--- a/superset/sql_parse.py
+++ b/superset/sql_parse.py
@@ -20,7 +20,7 @@ def __init__(self, sql_statement):
self._table_names = set()
self._alias_names = set()
# TODO: multistatement support
- logging.info("Parsing with sqlparse statement {}".format(self.sql))
+ logging.info('Parsing with sqlparse statement {}'.format(self.sql))
self._parsed = sqlparse.parse(self.sql)
for statement in self._parsed:
self.__extract_from_token(statement)
@@ -50,7 +50,7 @@ def __precedes_table_name(token_value):
@staticmethod
def __get_full_name(identifier):
if len(identifier.tokens) > 1 and identifier.tokens[1].value == '.':
- return "{}.{}".format(identifier.tokens[0].value,
+ return '{}.{}'.format(identifier.tokens[0].value,
identifier.tokens[2].value)
return identifier.get_real_name()
@@ -101,7 +101,7 @@ def as_create_table(self, table_name, overwrite=False):
sql = self.stripped()
if overwrite:
exec_sql = 'DROP TABLE IF EXISTS {table_name};\n'
- exec_sql += "CREATE TABLE {table_name} AS \n{sql}"
+ exec_sql += 'CREATE TABLE {table_name} AS \n{sql}'
return exec_sql.format(**locals())
def __extract_from_token(self, token):
diff --git a/superset/stats_logger.py b/superset/stats_logger.py
index bbeadd7b0ea14..9644f10ea8998 100644
--- a/superset/stats_logger.py
+++ b/superset/stats_logger.py
@@ -30,17 +30,17 @@ def gauge(self, key):
class DummyStatsLogger(BaseStatsLogger):
def incr(self, key):
logging.debug(
- Fore.CYAN + "[stats_logger] (incr) " + key + Style.RESET_ALL)
+ Fore.CYAN + '[stats_logger] (incr) ' + key + Style.RESET_ALL)
def decr(self, key):
logging.debug((
- Fore.CYAN + "[stats_logger] (decr) " + key +
+ Fore.CYAN + '[stats_logger] (decr) ' + key +
Style.RESET_ALL))
def gauge(self, key, value):
logging.debug((
- Fore.CYAN + "[stats_logger] (gauge) "
- "{key} | {value}" + Style.RESET_ALL).format(**locals()))
+ Fore.CYAN + '[stats_logger] (gauge) '
+ '{key} | {value}' + Style.RESET_ALL).format(**locals()))
try:
diff --git a/superset/utils.py b/superset/utils.py
index f3f1a60f6a491..42f2da3aaf031 100644
--- a/superset/utils.py
+++ b/superset/utils.py
@@ -186,9 +186,9 @@ def parse_human_datetime(s):
datetime.datetime(2015, 4, 3, 0, 0)
>>> parse_human_datetime('2/3/1969')
datetime.datetime(1969, 2, 3, 0, 0)
- >>> parse_human_datetime("now") <= datetime.now()
+ >>> parse_human_datetime('now') <= datetime.now()
True
- >>> parse_human_datetime("yesterday") <= datetime.now()
+ >>> parse_human_datetime('yesterday') <= datetime.now()
True
>>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date()
True
@@ -205,7 +205,7 @@ def parse_human_datetime(s):
try:
cal = parsedatetime.Calendar()
parsed_dttm, parsed_flags = cal.parseDT(s)
- # when time is not extracted, we "reset to midnight"
+ # when time is not extracted, we 'reset to midnight'
if parsed_flags & 2 == 0:
parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0)
dttm = dttm_from_timtuple(parsed_dttm.utctimetuple())
@@ -224,7 +224,7 @@ def parse_human_timedelta(s):
"""
Returns ``datetime.datetime`` from natural language time deltas
- >>> parse_human_datetime("now") <= datetime.now()
+ >>> parse_human_datetime('now') <= datetime.now()
True
"""
cal = parsedatetime.Calendar()
@@ -260,7 +260,7 @@ def datetime_f(dttm):
dttm = dttm[11:]
elif now_iso[:4] == dttm[:4]:
dttm = dttm[5:]
- return "{}".format(dttm)
+ return '{}'.format(dttm)
def base_json_conv(obj):
@@ -298,7 +298,7 @@ def json_iso_dttm_ser(obj):
obj = obj.isoformat()
else:
raise TypeError(
- "Unserializable object {} of type {}".format(obj, type(obj)))
+ 'Unserializable object {} of type {}'.format(obj, type(obj)))
return obj
@@ -324,7 +324,7 @@ def json_int_dttm_ser(obj):
obj = (obj - EPOCH.date()).total_seconds() * 1000
else:
raise TypeError(
- "Unserializable object {} of type {}".format(obj, type(obj)))
+ 'Unserializable object {} of type {}'.format(obj, type(obj)))
return obj
@@ -343,7 +343,7 @@ def error_msg_from_exception(e):
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
- presto.connect("localhost", port=3506, catalog='silver') - as a dict.
+ presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
"""
msg = ''
@@ -351,7 +351,7 @@ def error_msg_from_exception(e):
if isinstance(e.message, dict):
msg = e.message.get('message')
elif e.message:
- msg = "{}".format(e.message)
+ msg = '{}'.format(e.message)
return msg or '{}'.format(e)
@@ -384,13 +384,13 @@ def generic_find_constraint_name(table, columns, referenced, db):
def get_datasource_full_name(database_name, datasource_name, schema=None):
if not schema:
- return "[{}].[{}]".format(database_name, datasource_name)
- return "[{}].[{}].[{}]".format(database_name, schema, datasource_name)
+ return '[{}].[{}]'.format(database_name, datasource_name)
+ return '[{}].[{}].[{}]'.format(database_name, schema, datasource_name)
def get_schema_perm(database, schema):
if schema:
- return "[{}].[{}]".format(database, schema)
+ return '[{}].[{}]'.format(database, schema)
def validate_json(obj):
@@ -398,7 +398,7 @@ def validate_json(obj):
try:
json.loads(obj)
except Exception:
- raise SupersetException("JSON is not valid")
+ raise SupersetException('JSON is not valid')
def table_has_constraint(table, name, db):
@@ -421,7 +421,7 @@ def __init__(self, seconds=1, error_message='Timeout'):
self.error_message = error_message
def handle_timeout(self, signum, frame):
- logging.error("Process timed out")
+ logging.error('Process timed out')
raise SupersetTimeoutException(self.error_message)
def __enter__(self):
@@ -441,15 +441,15 @@ def __exit__(self, type, value, traceback):
def pessimistic_connection_handling(some_engine):
- @event.listens_for(some_engine, "engine_connect")
+ @event.listens_for(some_engine, 'engine_connect')
def ping_connection(connection, branch):
if branch:
- # "branch" refers to a sub-connection of a connection,
+ # 'branch' refers to a sub-connection of a connection,
# we don't want to bother pinging on these.
return
- # turn off "close with result". This flag is only used with
- # "connectionless" execution, otherwise will be False in any case
+ # turn off 'close with result'. This flag is only used with
+ # 'connectionless' execution, otherwise will be False in any case
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
@@ -461,7 +461,7 @@ def ping_connection(connection, branch):
except exc.DBAPIError as err:
# catch SQLAlchemy's DBAPIError, which is a wrapper
# for the DBAPI's exception. It includes a .connection_invalidated
- # attribute which specifies if this connection is a "disconnect"
+ # attribute which specifies if this connection is a 'disconnect'
# condition, which is based on inspection of the original exception
# by the dialect in use.
if err.connection_invalidated:
@@ -473,7 +473,7 @@ def ping_connection(connection, branch):
else:
raise
finally:
- # restore "close with result"
+ # restore 'close with result'
connection.should_close_with_result = save_should_close_with_result
@@ -514,11 +514,11 @@ def send_email_smtp(to, subject, html_content, config, files=None,
msg = MIMEMultipart(mime_subtype)
msg['Subject'] = subject
msg['From'] = smtp_mail_from
- msg['To'] = ", ".join(to)
+ msg['To'] = ', '.join(to)
recipients = to
if cc:
cc = get_email_address_list(cc)
- msg['CC'] = ", ".join(cc)
+ msg['CC'] = ', '.join(cc)
recipients = recipients + cc
if bcc:
@@ -532,11 +532,11 @@ def send_email_smtp(to, subject, html_content, config, files=None,
for fname in files or []:
basename = os.path.basename(fname)
- with open(fname, "rb") as f:
+ with open(fname, 'rb') as f:
msg.attach(
MIMEApplication(
f.read(),
- Content_Disposition='attachment; filename="%s"' % basename,
+ Content_Disposition="attachment; filename='%s'" % basename,
Name=basename))
send_MIME_email(smtp_mail_from, recipients, msg, config, dryrun=dryrun)
@@ -557,7 +557,7 @@ def send_MIME_email(e_from, e_to, mime_msg, config, dryrun=False):
s.starttls()
if SMTP_USER and SMTP_PASSWORD:
s.login(SMTP_USER, SMTP_PASSWORD)
- logging.info("Sent an alert email to " + str(e_to))
+ logging.info('Sent an alert email to ' + str(e_to))
s.sendmail(e_from, e_to, mime_msg.as_string())
s.quit()
else:
@@ -601,11 +601,11 @@ def wraps(self, *args, **kwargs):
logging.warning(
LOGMSG_ERR_SEC_ACCESS_DENIED.format(permission_str,
self.__class__.__name__))
- flash(as_unicode(FLAMSG_ERR_SEC_ACCESS_DENIED), "danger")
+ flash(as_unicode(FLAMSG_ERR_SEC_ACCESS_DENIED), 'danger')
# adds next arg to forward to the original path once user is logged in.
return redirect(
url_for(
- self.appbuilder.sm.auth_view.__class__.__name__ + ".login",
+ self.appbuilder.sm.auth_view.__class__.__name__ + '.login',
next=request.path))
f._permission_name = permission_str
@@ -631,7 +631,7 @@ def zlib_compress(data):
"""
if PY3K:
if isinstance(data, str):
- return zlib.compress(bytes(data, "utf-8"))
+ return zlib.compress(bytes(data, 'utf-8'))
return zlib.compress(data)
return zlib.compress(data)
@@ -649,8 +649,8 @@ def zlib_decompress_to_string(blob):
if isinstance(blob, bytes):
decompressed = zlib.decompress(blob)
else:
- decompressed = zlib.decompress(bytes(blob, "utf-8"))
- return decompressed.decode("utf-8")
+ decompressed = zlib.decompress(bytes(blob, 'utf-8'))
+ return decompressed.decode('utf-8')
return zlib.decompress(blob)
@@ -668,7 +668,7 @@ def get_celery_app(config):
def merge_extra_filters(form_data):
# extra_filters are temporary/contextual filters that are external
# to the slice definition. We use those for dynamic interactive
- # filters like the ones emitted by the "Filter Box" visualization
+ # filters like the ones emitted by the 'Filter Box' visualization
if form_data.get('extra_filters'):
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
diff --git a/superset/views/annotations.py b/superset/views/annotations.py
index b43e97e2b10a8..e11a412b78b13 100644
--- a/superset/views/annotations.py
+++ b/superset/views/annotations.py
@@ -20,15 +20,15 @@ class AnnotationModelView(SupersetModelView, DeleteMixin): # noqa
def pre_add(self, obj):
if not obj.layer:
- raise Exception("Annotation layer is required.")
+ raise Exception('Annotation layer is required.')
if not obj.start_dttm and not obj.end_dttm:
- raise Exception("Annotation start time or end time is required.")
+ raise Exception('Annotation start time or end time is required.')
elif not obj.start_dttm:
obj.start_dttm = obj.end_dttm
elif not obj.end_dttm:
obj.end_dttm = obj.start_dttm
elif obj.end_dttm < obj.start_dttm:
- raise Exception("Annotation end time must be no earlier than start time.")
+ raise Exception('Annotation end time must be no earlier than start time.')
def pre_update(self, obj):
self.pre_add(obj)
@@ -43,17 +43,17 @@ class AnnotationLayerModelView(SupersetModelView, DeleteMixin):
appbuilder.add_view(
AnnotationLayerModelView,
- "Annotation Layers",
- label=__("Annotation Layers"),
- icon="fa-comment",
- category="Manage",
- category_label=__("Manage"),
+ 'Annotation Layers',
+ label=__('Annotation Layers'),
+ icon='fa-comment',
+ category='Manage',
+ category_label=__('Manage'),
category_icon='')
appbuilder.add_view(
AnnotationModelView,
- "Annotations",
- label=__("Annotations"),
- icon="fa-comments",
- category="Manage",
- category_label=__("Manage"),
+ 'Annotations',
+ label=__('Annotations'),
+ icon='fa-comments',
+ category='Manage',
+ category_label=__('Manage'),
category_icon='')
diff --git a/superset/views/base.py b/superset/views/base.py
index cf0b2b96338b2..7bc55d2c2732f 100644
--- a/superset/views/base.py
+++ b/superset/views/base.py
@@ -21,13 +21,13 @@
def get_error_msg():
- if conf.get("SHOW_STACKTRACE"):
+ if conf.get('SHOW_STACKTRACE'):
error_msg = traceback.format_exc()
else:
- error_msg = "FATAL ERROR \n"
+ error_msg = 'FATAL ERROR \n'
error_msg += (
- "Stacktrace is hidden. Change the SHOW_STACKTRACE "
- "configuration setting to enable it")
+ 'Stacktrace is hidden. Change the SHOW_STACKTRACE '
+ 'configuration setting to enable it')
return error_msg
@@ -38,7 +38,7 @@ def json_error_response(msg=None, status=500, stacktrace=None, payload=None):
payload['stacktrace'] = stacktrace
return Response(
json.dumps(payload, default=utils.json_iso_dttm_ser),
- status=status, mimetype="application/json")
+ status=status, mimetype='application/json')
def api(f):
@@ -57,7 +57,7 @@ def wraps(self, *args, **kwargs):
def get_datasource_exist_error_mgs(full_name):
- return __("Datasource %(name)s already exists", name=full_name)
+ return __('Datasource %(name)s already exists', name=full_name)
def get_user_roles():
@@ -76,26 +76,26 @@ def can_access(self, permission_name, view_name, user=None):
def all_datasource_access(self, user=None):
return self.can_access(
- "all_datasource_access", "all_datasource_access", user=user)
+ 'all_datasource_access', 'all_datasource_access', user=user)
def database_access(self, database, user=None):
return (
self.can_access(
- "all_database_access", "all_database_access", user=user) or
- self.can_access("database_access", database.perm, user=user)
+ 'all_database_access', 'all_database_access', user=user) or
+ self.can_access('database_access', database.perm, user=user)
)
def schema_access(self, datasource, user=None):
return (
self.database_access(datasource.database, user=user) or
self.all_datasource_access(user=user) or
- self.can_access("schema_access", datasource.schema_perm, user=user)
+ self.can_access('schema_access', datasource.schema_perm, user=user)
)
def datasource_access(self, datasource, user=None):
return (
self.schema_access(datasource, user=user) or
- self.can_access("datasource_access", datasource.perm, user=user)
+ self.can_access('datasource_access', datasource.perm, user=user)
)
def datasource_access_by_name(
@@ -110,13 +110,13 @@ def datasource_access_by_name(
datasources = ConnectorRegistry.query_datasources_by_name(
db.session, database, datasource_name, schema=schema)
for datasource in datasources:
- if self.can_access("datasource_access", datasource.perm):
+ if self.can_access('datasource_access', datasource.perm):
return True
return False
def datasource_access_by_fullname(
self, database, full_table_name, schema):
- table_name_pieces = full_table_name.split(".")
+ table_name_pieces = full_table_name.split('.')
if len(table_name_pieces) == 2:
table_schema = table_name_pieces[0]
table_name = table_name_pieces[1]
@@ -234,7 +234,7 @@ def _delete(self, pk):
try:
self.pre_delete(item)
except Exception as e:
- flash(str(e), "danger")
+ flash(str(e), 'danger')
else:
view_menu = sm.find_view_menu(item.get_perm())
pvs = sm.get_session.query(sm.permissionview_model).filter_by(
@@ -266,10 +266,10 @@ def _delete(self, pk):
self.update_redirect()
@action(
- "muldelete",
- __("Delete"),
- __("Delete all Really?"),
- "fa-trash",
+ 'muldelete',
+ __('Delete'),
+ __('Delete all Really?'),
+ 'fa-trash',
single=False,
)
def muldelete(self, items):
@@ -279,7 +279,7 @@ def muldelete(self, items):
try:
self.pre_delete(item)
except Exception as e:
- flash(str(e), "danger")
+ flash(str(e), 'danger')
else:
self._delete(item.id)
self.update_redirect()
diff --git a/superset/views/core.py b/superset/views/core.py
index c106556edc99b..e5ae70846e763 100755
--- a/superset/views/core.py
+++ b/superset/views/core.py
@@ -52,26 +52,26 @@
ALL_DATASOURCE_ACCESS_ERR = __(
- "This endpoint requires the `all_datasource_access` permission")
-DATASOURCE_MISSING_ERR = __("The datasource seems to have been deleted")
+ 'This endpoint requires the `all_datasource_access` permission')
+DATASOURCE_MISSING_ERR = __('The datasource seems to have been deleted')
ACCESS_REQUEST_MISSING_ERR = __(
- "The access requests seem to have been deleted")
-USER_MISSING_ERR = __("The user seems to have been deleted")
+ 'The access requests seem to have been deleted')
+USER_MISSING_ERR = __('The user seems to have been deleted')
DATASOURCE_ACCESS_ERR = __("You don't have access to this datasource")
def get_database_access_error_msg(database_name):
- return __("This view requires the database %(name)s or "
- "`all_datasource_access` permission", name=database_name)
+ return __('This view requires the database %(name)s or '
+ '`all_datasource_access` permission', name=database_name)
def get_datasource_access_error_msg(datasource_name):
- return __("This endpoint requires the datasource %(name)s, database or "
- "`all_datasource_access` permission", name=datasource_name)
+ return __('This endpoint requires the datasource %(name)s, database or '
+ '`all_datasource_access` permission', name=datasource_name)
def json_success(json_msg, status=200):
- return Response(json_msg, status=status, mimetype="application/json")
+ return Response(json_msg, status=status, mimetype='application/json')
def is_owner(obj, user):
@@ -158,10 +158,10 @@ def apply(self, query, func): # noqa
def generate_download_headers(extension):
- filename = datetime.now().strftime("%Y%m%d_%H%M%S")
- content_disp = "attachment; filename={}.{}".format(filename, extension)
+ filename = datetime.now().strftime('%Y%m%d_%H%M%S')
+ content_disp = 'attachment; filename={}.{}'.format(filename, extension)
headers = {
- "Content-Disposition": content_disp,
+ 'Content-Disposition': content_disp,
}
return headers
@@ -201,63 +201,63 @@ class DatabaseView(SupersetModelView, DeleteMixin): # noqa
'changed_by',
'changed_on',
]
- add_template = "superset/models/database/add.html"
- edit_template = "superset/models/database/edit.html"
+ add_template = 'superset/models/database/add.html'
+ edit_template = 'superset/models/database/edit.html'
base_order = ('changed_on', 'desc')
description_columns = {
'sqlalchemy_uri': utils.markdown(
- "Refer to the "
- "[SqlAlchemy docs]"
- "(http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html#"
- "database-urls) "
- "for more information on how to structure your URI.", True),
- 'expose_in_sqllab': _("Expose this DB in SQL Lab"),
+ 'Refer to the '
+ '[SqlAlchemy docs]'
+ '(http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html#'
+ 'database-urls) '
+ 'for more information on how to structure your URI.', True),
+ 'expose_in_sqllab': _('Expose this DB in SQL Lab'),
'allow_run_sync': _(
- "Allow users to run synchronous queries, this is the default "
- "and should work well for queries that can be executed "
- "within a web request scope (<~1 minute)"),
+ 'Allow users to run synchronous queries, this is the default '
+ 'and should work well for queries that can be executed '
+ 'within a web request scope (<~1 minute)'),
'allow_run_async': _(
- "Allow users to run queries, against an async backend. "
- "This assumes that you have a Celery worker setup as well "
- "as a results backend."),
- 'allow_ctas': _("Allow CREATE TABLE AS option in SQL Lab"),
+ 'Allow users to run queries, against an async backend. '
+ 'This assumes that you have a Celery worker setup as well '
+ 'as a results backend.'),
+ 'allow_ctas': _('Allow CREATE TABLE AS option in SQL Lab'),
'allow_dml': _(
- "Allow users to run non-SELECT statements "
- "(UPDATE, DELETE, CREATE, ...) "
- "in SQL Lab"),
+ 'Allow users to run non-SELECT statements '
+ '(UPDATE, DELETE, CREATE, ...) '
+ 'in SQL Lab'),
'force_ctas_schema': _(
- "When allowing CREATE TABLE AS option in SQL Lab, "
- "this option forces the table to be created in this schema"),
+ 'When allowing CREATE TABLE AS option in SQL Lab, '
+ 'this option forces the table to be created in this schema'),
'extra': utils.markdown(
- "JSON string containing extra configuration elements. "
- "The ``engine_params`` object gets unpacked into the "
- "[sqlalchemy.create_engine]"
- "(http://docs.sqlalchemy.org/en/latest/core/engines.html#"
- "sqlalchemy.create_engine) call, while the ``metadata_params`` "
- "gets unpacked into the [sqlalchemy.MetaData]"
- "(http://docs.sqlalchemy.org/en/rel_1_0/core/metadata.html"
- "#sqlalchemy.schema.MetaData) call. ", True),
+ 'JSON string containing extra configuration elements. '
+ 'The ``engine_params`` object gets unpacked into the '
+ '[sqlalchemy.create_engine]'
+ '(http://docs.sqlalchemy.org/en/latest/core/engines.html#'
+ 'sqlalchemy.create_engine) call, while the ``metadata_params`` '
+ 'gets unpacked into the [sqlalchemy.MetaData]'
+ '(http://docs.sqlalchemy.org/en/rel_1_0/core/metadata.html'
+ '#sqlalchemy.schema.MetaData) call. ', True),
'impersonate_user': _(
- "If Presto, all the queries in SQL Lab are going to be executed as the "
- "currently logged on user who must have permission to run them.
"
- "If Hive and hive.server2.enable.doAs is enabled, will run the queries as "
- "service account, but impersonate the currently logged on user "
- "via hive.server2.proxy.user property."),
+ 'If Presto, all the queries in SQL Lab are going to be executed as the '
+ 'currently logged on user who must have permission to run them.
'
+ 'If Hive and hive.server2.enable.doAs is enabled, will run the queries as '
+ 'service account, but impersonate the currently logged on user '
+ 'via hive.server2.proxy.user property.'),
}
label_columns = {
- 'expose_in_sqllab': _("Expose in SQL Lab"),
- 'allow_ctas': _("Allow CREATE TABLE AS"),
- 'allow_dml': _("Allow DML"),
- 'force_ctas_schema': _("CTAS Schema"),
- 'database_name': _("Database"),
- 'creator': _("Creator"),
- 'changed_on_': _("Last Changed"),
- 'sqlalchemy_uri': _("SQLAlchemy URI"),
- 'cache_timeout': _("Cache Timeout"),
- 'extra': _("Extra"),
- 'allow_run_sync': _("Allow Run Sync"),
- 'allow_run_async': _("Allow Run Async"),
- 'impersonate_user': _("Impersonate the logged on user"),
+ 'expose_in_sqllab': _('Expose in SQL Lab'),
+ 'allow_ctas': _('Allow CREATE TABLE AS'),
+ 'allow_dml': _('Allow DML'),
+ 'force_ctas_schema': _('CTAS Schema'),
+ 'database_name': _('Database'),
+ 'creator': _('Creator'),
+ 'changed_on_': _('Last Changed'),
+ 'sqlalchemy_uri': _('SQLAlchemy URI'),
+ 'cache_timeout': _('Cache Timeout'),
+ 'extra': _('Extra'),
+ 'allow_run_sync': _('Allow Run Sync'),
+ 'allow_run_async': _('Allow Run Async'),
+ 'impersonate_user': _('Impersonate the logged on user'),
}
def pre_add(self, db):
@@ -276,21 +276,21 @@ def _delete(self, pk):
appbuilder.add_link(
'Import Dashboards',
- label=__("Import Dashboards"),
+ label=__('Import Dashboards'),
href='/superset/import_dashboards',
- icon="fa-cloud-upload",
+ icon='fa-cloud-upload',
category='Manage',
- category_label=__("Manage"),
+ category_label=__('Manage'),
category_icon='fa-wrench',)
appbuilder.add_view(
DatabaseView,
- "Databases",
- label=__("Databases"),
- icon="fa-database",
- category="Sources",
- category_label=__("Sources"),
+ 'Databases',
+ label=__('Databases'),
+ icon='fa-database',
+ category='Sources',
+ category_label=__('Sources'),
category_icon='fa-database',)
@@ -320,21 +320,21 @@ class AccessRequestsModelView(SupersetModelView, DeleteMixin):
order_columns = ['created_on']
base_order = ('changed_on', 'desc')
label_columns = {
- 'username': _("User"),
- 'user_roles': _("User Roles"),
- 'database': _("Database URL"),
- 'datasource_link': _("Datasource"),
- 'roles_with_datasource': _("Roles to grant"),
- 'created_on': _("Created On"),
+ 'username': _('User'),
+ 'user_roles': _('User Roles'),
+ 'database': _('Database URL'),
+ 'datasource_link': _('Datasource'),
+ 'roles_with_datasource': _('Roles to grant'),
+ 'created_on': _('Created On'),
}
appbuilder.add_view(
AccessRequestsModelView,
- "Access requests",
- label=__("Access requests"),
- category="Security",
- category_label=__("Security"),
+ 'Access requests',
+ label=__('Access requests'),
+ category='Security',
+ category_label=__('Security'),
icon='fa-table',)
@@ -362,33 +362,33 @@ class SliceModelView(SupersetModelView, DeleteMixin): # noqa
base_order = ('changed_on', 'desc')
description_columns = {
'description': Markup(
- "The content here can be displayed as widget headers in the "
- "dashboard view. Supports "
- ""
- "markdown"),
+ 'The content here can be displayed as widget headers in the '
+ 'dashboard view. Supports '
+ ''
+ 'markdown'),
'params': _(
- "These parameters are generated dynamically when clicking "
- "the save or overwrite button in the explore view. This JSON "
- "object is exposed here for reference and for power users who may "
- "want to alter specific parameters.",
+ 'These parameters are generated dynamically when clicking '
+ 'the save or overwrite button in the explore view. This JSON '
+ 'object is exposed here for reference and for power users who may '
+ 'want to alter specific parameters.',
),
'cache_timeout': _(
- "Duration (in seconds) of the caching timeout for this slice."),
+ 'Duration (in seconds) of the caching timeout for this slice.'),
}
base_filters = [['id', SliceFilter, lambda: []]]
label_columns = {
- 'cache_timeout': _("Cache Timeout"),
- 'creator': _("Creator"),
- 'dashboards': _("Dashboards"),
- 'datasource_link': _("Datasource"),
- 'description': _("Description"),
- 'modified': _("Last Modified"),
- 'owners': _("Owners"),
- 'params': _("Parameters"),
- 'slice_link': _("Slice"),
- 'slice_name': _("Name"),
- 'table': _("Table"),
- 'viz_type': _("Visualization Type"),
+ 'cache_timeout': _('Cache Timeout'),
+ 'creator': _('Creator'),
+ 'dashboards': _('Dashboards'),
+ 'datasource_link': _('Datasource'),
+ 'description': _('Description'),
+ 'modified': _('Last Modified'),
+ 'owners': _('Owners'),
+ 'params': _('Parameters'),
+ 'slice_link': _('Slice'),
+ 'slice_name': _('Name'),
+ 'table': _('Table'),
+ 'viz_type': _('Visualization Type'),
}
def pre_add(self, obj):
@@ -410,19 +410,19 @@ def add(self):
for d in datasources
]
return self.render_template(
- "superset/add_slice.html",
+ 'superset/add_slice.html',
bootstrap_data=json.dumps({
- 'datasources': sorted(datasources, key=lambda d: d["label"]),
+ 'datasources': sorted(datasources, key=lambda d: d['label']),
}),
)
appbuilder.add_view(
SliceModelView,
- "Slices",
- label=__("Slices"),
- icon="fa-bar-chart",
- category="",
+ 'Slices',
+ label=__('Slices'),
+ icon='fa-bar-chart',
+ category='',
category_icon='',)
@@ -467,21 +467,21 @@ class DashboardModelView(SupersetModelView, DeleteMixin): # noqa
base_order = ('changed_on', 'desc')
description_columns = {
'position_json': _(
- "This json object describes the positioning of the widgets in "
- "the dashboard. It is dynamically generated when adjusting "
- "the widgets size and positions by using drag & drop in "
- "the dashboard view"),
+ 'This json object describes the positioning of the widgets in '
+ 'the dashboard. It is dynamically generated when adjusting '
+ 'the widgets size and positions by using drag & drop in '
+ 'the dashboard view'),
'css': _(
- "The css for individual dashboards can be altered here, or "
- "in the dashboard view where changes are immediately "
- "visible"),
- 'slug': _("To get a readable URL for your dashboard"),
+ 'The css for individual dashboards can be altered here, or '
+ 'in the dashboard view where changes are immediately '
+ 'visible'),
+ 'slug': _('To get a readable URL for your dashboard'),
'json_metadata': _(
- "This JSON object is generated dynamically when clicking "
- "the save or overwrite button in the dashboard view. It "
- "is exposed here for reference and for power users who may "
- "want to alter specific parameters."),
- 'owners': _("Owners is a list of users who can alter the dashboard."),
+ 'This JSON object is generated dynamically when clicking '
+ 'the save or overwrite button in the dashboard view. It '
+ 'is exposed here for reference and for power users who may '
+ 'want to alter specific parameters.'),
+ 'owners': _('Owners is a list of users who can alter the dashboard.'),
}
base_filters = [['slice', DashboardFilter, lambda: []]]
add_form_query_rel_fields = {
@@ -489,23 +489,23 @@ class DashboardModelView(SupersetModelView, DeleteMixin): # noqa
}
edit_form_query_rel_fields = add_form_query_rel_fields
label_columns = {
- 'dashboard_link': _("Dashboard"),
- 'dashboard_title': _("Title"),
- 'slug': _("Slug"),
- 'slices': _("Slices"),
- 'owners': _("Owners"),
- 'creator': _("Creator"),
- 'modified': _("Modified"),
- 'position_json': _("Position JSON"),
- 'css': _("CSS"),
- 'json_metadata': _("JSON Metadata"),
- 'table_names': _("Underlying Tables"),
+ 'dashboard_link': _('Dashboard'),
+ 'dashboard_title': _('Title'),
+ 'slug': _('Slug'),
+ 'slices': _('Slices'),
+ 'owners': _('Owners'),
+ 'creator': _('Creator'),
+ 'modified': _('Modified'),
+ 'position_json': _('Position JSON'),
+ 'css': _('CSS'),
+ 'json_metadata': _('JSON Metadata'),
+ 'table_names': _('Underlying Tables'),
}
def pre_add(self, obj):
obj.slug = obj.slug.strip() or None
if obj.slug:
- obj.slug = obj.slug.replace(" ", "-")
+ obj.slug = obj.slug.replace(' ', '-')
obj.slug = re.sub(r'\W+', '', obj.slug)
if g.user not in obj.owners:
obj.owners.append(g.user)
@@ -522,7 +522,7 @@ def pre_update(self, obj):
def pre_delete(self, obj):
check_ownership(obj)
- @action("mulexport", __("Export"), __("Export dashboards?"), "fa-database")
+ @action('mulexport', __('Export'), __('Export dashboards?'), 'fa-database')
def mulexport(self, items):
if not isinstance(items, list):
items = [items]
@@ -530,14 +530,14 @@ def mulexport(self, items):
return redirect(
'/dashboardmodelview/export_dashboards_form?{}'.format(ids[1:]))
- @expose("/export_dashboards_form")
+ @expose('/export_dashboards_form')
def download_dashboards(self):
if request.args.get('action') == 'go':
ids = request.args.getlist('id')
return Response(
models.Dashboard.export_dashboards(ids),
- headers=generate_download_headers("pickle"),
- mimetype="application/text")
+ headers=generate_download_headers('pickle'),
+ mimetype='application/text')
return self.render_template(
'superset/export_dashboards.html',
dashboards_url='/dashboardmodelview/list',
@@ -546,9 +546,9 @@ def download_dashboards(self):
appbuilder.add_view(
DashboardModelView,
- "Dashboards",
- label=__("Dashboards"),
- icon="fa-dashboard",
+ 'Dashboards',
+ label=__('Dashboards'),
+ icon='fa-dashboard',
category='',
category_icon='',)
@@ -572,35 +572,35 @@ class LogModelView(SupersetModelView):
edit_columns = ('user', 'action', 'dttm', 'json')
base_order = ('dttm', 'desc')
label_columns = {
- 'user': _("User"),
- 'action': _("Action"),
- 'dttm': _("dttm"),
- 'json': _("JSON"),
+ 'user': _('User'),
+ 'action': _('Action'),
+ 'dttm': _('dttm'),
+ 'json': _('JSON'),
}
appbuilder.add_view(
LogModelView,
- "Action Log",
- label=__("Action Log"),
- category="Security",
- category_label=__("Security"),
- icon="fa-list-ol")
+ 'Action Log',
+ label=__('Action Log'),
+ category='Security',
+ category_label=__('Security'),
+ icon='fa-list-ol')
@app.route('/health')
def health():
- return "OK"
+ return 'OK'
@app.route('/healthcheck')
def healthcheck():
- return "OK"
+ return 'OK'
@app.route('/ping')
def ping():
- return "OK"
+ return 'OK'
class KV(BaseSupersetView):
@@ -608,7 +608,7 @@ class KV(BaseSupersetView):
"""Used for storing and retrieving key value pairs"""
@log_this
- @expose("/store/", methods=['POST'])
+ @expose('/store/', methods=['POST'])
def store(self):
try:
value = request.form.get('data')
@@ -622,7 +622,7 @@ def store(self):
status=200)
@log_this
- @expose("//", methods=['GET'])
+ @expose('//', methods=['GET'])
def get_value(self, key_id):
kv = None
try:
@@ -640,30 +640,30 @@ class R(BaseSupersetView):
"""used for short urls"""
@log_this
- @expose("/")
+ @expose('/')
def index(self, url_id):
url = db.session.query(models.Url).filter_by(id=url_id).first()
if url:
return redirect('/' + url.url)
else:
- flash("URL to nowhere...", "danger")
+ flash('URL to nowhere...', 'danger')
return redirect('/')
@log_this
- @expose("/shortner/", methods=['POST', 'GET'])
+ @expose('/shortner/', methods=['POST', 'GET'])
def shortner(self):
url = request.form.get('data')
obj = models.Url(url=url)
db.session.add(obj)
db.session.commit()
- return("http://{request.headers[Host]}/r/{obj.id}".format(
+ return('http://{request.headers[Host]}/r/{obj.id}'.format(
request=request, obj=obj))
- @expose("/msg/")
+ @expose('/msg/')
def msg(self):
"""Redirects to specified url while flash a message"""
- flash(Markup(request.args.get("msg")), "info")
- return redirect(request.args.get("url"))
+ flash(Markup(request.args.get('msg')), 'info')
+ return redirect(request.args.get('url'))
appbuilder.add_view_no_menu(R)
@@ -673,7 +673,7 @@ class Superset(BaseSupersetView):
"""The base views for Superset!"""
@api
@has_access_api
- @expose("/update_role/", methods=['POST'])
+ @expose('/update_role/', methods=['POST'])
def update_role(self):
"""Assigns a list of found users to the given role."""
data = request.get_json(force=True)
@@ -699,7 +699,7 @@ def update_role(self):
user_data = user_data_dict[username]
user = sm.find_user(email=user_data['email'])
if not user:
- logging.info("Adding user: {}.".format(user_data))
+ logging.info('Adding user: {}.'.format(user_data))
sm.add_user(
username=user_data['username'],
first_name=user_data['first_name'],
@@ -727,10 +727,10 @@ def json_response(self, obj, status=200):
return Response(
json.dumps(obj, default=utils.json_int_dttm_ser),
status=status,
- mimetype="application/json")
+ mimetype='application/json')
@has_access_api
- @expose("/datasources/")
+ @expose('/datasources/')
def datasources(self):
datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [o.short_data for o in datasources]
@@ -738,7 +738,7 @@ def datasources(self):
return self.json_response(datasources)
@has_access_api
- @expose("/override_role_permissions/", methods=['POST'])
+ @expose('/override_role_permissions/', methods=['POST'])
def override_role_permissions(self):
"""Updates the role with the give datasource permissions.
@@ -792,7 +792,7 @@ def override_role_permissions(self):
@log_this
@has_access
- @expose("/request_access/")
+ @expose('/request_access/')
def request_access(self):
datasources = set()
dashboard_id = request.args.get('dashboard_id')
@@ -820,18 +820,18 @@ def request_access(self):
datasource_type=datasource.type)
db.session.add(access_request)
db.session.commit()
- flash(__("Access was requested"), "info")
+ flash(__('Access was requested'), 'info')
return redirect('/')
return self.render_template(
'superset/request_access.html',
datasources=datasources,
- datasource_names=", ".join([o.name for o in datasources]),
+ datasource_names=', '.join([o.name for o in datasources]),
)
@log_this
@has_access
- @expose("/approve")
+ @expose('/approve')
def approve(self):
def clean_fulfilled_requests(session):
for r in session.query(DAR).all():
@@ -854,12 +854,12 @@ def clean_fulfilled_requests(session):
datasource_type, datasource_id, session)
if not datasource:
- flash(DATASOURCE_MISSING_ERR, "alert")
+ flash(DATASOURCE_MISSING_ERR, 'alert')
return json_error_response(DATASOURCE_MISSING_ERR)
requested_by = sm.find_user(username=created_by_username)
if not requested_by:
- flash(USER_MISSING_ERR, "alert")
+ flash(USER_MISSING_ERR, 'alert')
return json_error_response(USER_MISSING_ERR)
requests = (
@@ -872,7 +872,7 @@ def clean_fulfilled_requests(session):
)
if not requests:
- flash(ACCESS_REQUEST_MISSING_ERR, "alert")
+ flash(ACCESS_REQUEST_MISSING_ERR, 'alert')
return json_error_response(ACCESS_REQUEST_MISSING_ERR)
# check if you can approve
@@ -882,32 +882,32 @@ def clean_fulfilled_requests(session):
role = sm.find_role(role_to_grant)
requested_by.roles.append(role)
msg = __(
- "%(user)s was granted the role %(role)s that gives access "
- "to the %(datasource)s",
+ '%(user)s was granted the role %(role)s that gives access '
+ 'to the %(datasource)s',
user=requested_by.username,
role=role_to_grant,
datasource=datasource.full_name)
utils.notify_user_about_perm_udate(
g.user, requested_by, role, datasource,
'email/role_granted.txt', app.config)
- flash(msg, "info")
+ flash(msg, 'info')
if role_to_extend:
perm_view = sm.find_permission_view_menu(
'email/datasource_access', datasource.perm)
role = sm.find_role(role_to_extend)
sm.add_permission_role(role, perm_view)
- msg = __("Role %(r)s was extended to provide the access to "
- "the datasource %(ds)s", r=role_to_extend,
+ msg = __('Role %(r)s was extended to provide the access to '
+ 'the datasource %(ds)s', r=role_to_extend,
ds=datasource.full_name)
utils.notify_user_about_perm_udate(
g.user, requested_by, role, datasource,
'email/role_extended.txt', app.config)
- flash(msg, "info")
+ flash(msg, 'info')
clean_fulfilled_requests(session)
else:
- flash(__("You have no permission to approve this request"),
- "danger")
+ flash(__('You have no permission to approve this request'),
+ 'danger')
return redirect('/accessrequestsmodelview/list/')
for r in requests:
session.delete(r)
@@ -916,17 +916,17 @@ def clean_fulfilled_requests(session):
def get_form_data(self):
# get form data from url
- if request.args.get("form_data"):
- form_data = request.args.get("form_data")
- elif request.form.get("form_data"):
+ if request.args.get('form_data'):
+ form_data = request.args.get('form_data')
+ elif request.form.get('form_data'):
# Supporting POST as well as get
- form_data = request.form.get("form_data")
+ form_data = request.form.get('form_data')
else:
form_data = '{}'
d = json.loads(form_data)
- if request.args.get("viz_type"):
+ if request.args.get('viz_type'):
# Converting old URLs
d = cast_form_data(request.args)
return d
@@ -956,7 +956,7 @@ def get_viz(
return viz_obj
@has_access
- @expose("/slice//")
+ @expose('/slice//')
def slice(self, slice_id):
viz_obj = self.get_viz(slice_id)
endpoint = (
@@ -967,7 +967,7 @@ def slice(self, slice_id):
parse.quote(json.dumps(viz_obj.form_data)),
)
)
- if request.args.get("standalone") == "true":
+ if request.args.get('standalone') == 'true':
endpoint += '&standalone=true'
return redirect(endpoint)
@@ -983,11 +983,11 @@ def get_query_string_response(self, viz_obj):
'language': viz_obj.datasource.query_language,
}),
status=200,
- mimetype="application/json")
+ mimetype='application/json')
@log_this
@has_access_api
- @expose("/explore_json///")
+ @expose('/explore_json///')
def explore_json(self, datasource_type, datasource_id):
try:
viz_obj = self.get_viz(
@@ -1003,14 +1003,14 @@ def explore_json(self, datasource_type, datasource_id):
if not self.datasource_access(viz_obj.datasource):
return json_error_response(DATASOURCE_ACCESS_ERR, status=404)
- if request.args.get("csv") == "true":
+ if request.args.get('csv') == 'true':
return CsvResponse(
viz_obj.get_csv(),
status=200,
- headers=generate_download_headers("csv"),
- mimetype="application/csv")
+ headers=generate_download_headers('csv'),
+ mimetype='application/csv')
- if request.args.get("query") == "true":
+ if request.args.get('query') == 'true':
return self.get_query_string_response(viz_obj)
payload = {}
@@ -1027,7 +1027,7 @@ def explore_json(self, datasource_type, datasource_id):
return json_success(viz_obj.json_dumps(payload), status=status)
- @expose("/import_dashboards", methods=['GET', 'POST'])
+ @expose('/import_dashboards', methods=['GET', 'POST'])
@log_this
def import_dashboards(self):
"""Overrides the dashboards using pickled instances from the file."""
@@ -1049,7 +1049,7 @@ def import_dashboards(self):
@log_this
@has_access
- @expose("/explorev2///")
+ @expose('/explorev2///')
def explorev2(self, datasource_type, datasource_id):
return redirect(url_for(
'Superset.explore',
@@ -1059,12 +1059,12 @@ def explorev2(self, datasource_type, datasource_id):
@log_this
@has_access
- @expose("/explore///")
+ @expose('/explore///')
def explore(self, datasource_type, datasource_id):
form_data = self.get_form_data()
datasource_id = int(datasource_id)
- viz_type = form_data.get("viz_type")
+ viz_type = form_data.get('viz_type')
slice_id = form_data.get('slice_id')
user_id = g.user.get_id() if g.user else None
@@ -1076,13 +1076,13 @@ def explore(self, datasource_type, datasource_id):
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
if not datasource:
- flash(DATASOURCE_MISSING_ERR, "danger")
+ flash(DATASOURCE_MISSING_ERR, 'danger')
return redirect(error_redirect)
if not self.datasource_access(datasource):
flash(
__(get_datasource_access_error_msg(datasource.name)),
- "danger")
+ 'danger')
return redirect(
'superset/request_access/?'
'datasource_type={datasource_type}&'
@@ -1121,30 +1121,30 @@ def explore(self, datasource_type, datasource_id):
# On explore, merge extra filters into the form data
merge_extra_filters(form_data)
- standalone = request.args.get("standalone") == "true"
+ standalone = request.args.get('standalone') == 'true'
bootstrap_data = {
- "can_add": slice_add_perm,
- "can_download": slice_download_perm,
- "can_overwrite": slice_overwrite_perm,
- "datasource": datasource.data,
- "form_data": form_data,
- "datasource_id": datasource_id,
- "datasource_type": datasource_type,
- "slice": slc.data if slc else None,
- "standalone": standalone,
- "user_id": user_id,
- "forced_height": request.args.get('height'),
+ 'can_add': slice_add_perm,
+ 'can_download': slice_download_perm,
+ 'can_overwrite': slice_overwrite_perm,
+ 'datasource': datasource.data,
+ 'form_data': form_data,
+ 'datasource_id': datasource_id,
+ 'datasource_type': datasource_type,
+ 'slice': slc.data if slc else None,
+ 'standalone': standalone,
+ 'user_id': user_id,
+ 'forced_height': request.args.get('height'),
'common': self.common_bootsrap_payload(),
}
table_name = datasource.table_name \
if datasource_type == 'table' \
else datasource.datasource_name
if slc:
- title = "[slice] " + slc.slice_name
+ title = '[slice] ' + slc.slice_name
else:
- title = "[explore] " + table_name
+ title = '[explore] ' + table_name
return self.render_template(
- "superset/basic.html",
+ 'superset/basic.html',
bootstrap_data=json.dumps(bootstrap_data),
entry='explore',
title=title,
@@ -1152,7 +1152,7 @@ def explore(self, datasource_type, datasource_id):
@api
@has_access_api
- @expose("/filter////")
+ @expose('/filter////')
def filter(self, datasource_type, datasource_id, column):
"""
Endpoint to retrieve values for specified column.
@@ -1209,29 +1209,29 @@ def save_or_overwrite_slice(
.one()
)
flash(
- "Slice [{}] was added to dashboard [{}]".format(
+ 'Slice [{}] was added to dashboard [{}]'.format(
slc.slice_name,
dash.dashboard_title),
- "info")
+ 'info')
elif request.args.get('add_to_dash') == 'new':
dash = models.Dashboard(
dashboard_title=request.args.get('new_dashboard_name'),
owners=[g.user] if g.user else [])
flash(
- "Dashboard [{}] just got created and slice [{}] was added "
- "to it".format(
+ 'Dashboard [{}] just got created and slice [{}] was added '
+ 'to it'.format(
dash.dashboard_title,
slc.slice_name),
- "info")
+ 'info')
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
- "can_add": slice_add_perm,
- "can_download": slice_download_perm,
- "can_overwrite": is_owner(slc, g.user),
+ 'can_add': slice_add_perm,
+ 'can_download': slice_download_perm,
+ 'can_overwrite': is_owner(slc, g.user),
'form_data': slc.form_data,
'slice': slc.data,
}
@@ -1243,21 +1243,21 @@ def save_or_overwrite_slice(
def save_slice(self, slc):
session = db.session()
- msg = "Slice [{}] has been saved".format(slc.slice_name)
+ msg = 'Slice [{}] has been saved'.format(slc.slice_name)
session.add(slc)
session.commit()
- flash(msg, "info")
+ flash(msg, 'info')
def overwrite_slice(self, slc):
session = db.session()
session.merge(slc)
session.commit()
- msg = "Slice [{}] has been overwritten".format(slc.slice_name)
- flash(msg, "info")
+ msg = 'Slice [{}] has been overwritten'.format(slc.slice_name)
+ flash(msg, 'info')
@api
@has_access_api
- @expose("/checkbox////", methods=['GET'])
+ @expose('/checkbox////', methods=['GET'])
def checkbox(self, model_view, id_, attr, value):
"""endpoint for checking/unchecking any boolean in a sqla model"""
modelview_to_model = {
@@ -1269,11 +1269,11 @@ def checkbox(self, model_view, id_, attr, value):
if obj:
setattr(obj, attr, value == 'true')
db.session.commit()
- return json_success("OK")
+ return json_success('OK')
@api
@has_access_api
- @expose("/activity_per_day")
+ @expose('/activity_per_day')
def activity_per_day(self):
"""endpoint to power the calendar heatmap on the welcome page"""
Log = models.Log # noqa
@@ -1291,7 +1291,7 @@ def activity_per_day(self):
@api
@has_access_api
- @expose("/schemas//")
+ @expose('/schemas//')
def schemas(self, db_id):
db_id = int(db_id)
database = (
@@ -1304,11 +1304,11 @@ def schemas(self, db_id):
schemas = self.schemas_accessible_by_user(database, schemas)
return Response(
json.dumps({'schemas': schemas}),
- mimetype="application/json")
+ mimetype='application/json')
@api
@has_access_api
- @expose("/tables////")
+ @expose('/tables////')
def tables(self, db_id, schema, substr):
"""Endpoint to fetch the list of tables for given database"""
db_id = int(db_id)
@@ -1344,7 +1344,7 @@ def tables(self, db_id, schema, substr):
@api
@has_access_api
- @expose("/copy_dash//", methods=['GET', 'POST'])
+ @expose('/copy_dash//', methods=['GET', 'POST'])
def copy_dash(self, dashboard_id):
"""Copy dashboard"""
session = db.session()
@@ -1383,7 +1383,7 @@ def copy_dash(self, dashboard_id):
@api
@has_access_api
- @expose("/save_dash//", methods=['GET', 'POST'])
+ @expose('/save_dash//', methods=['GET', 'POST'])
def save_dash(self, dashboard_id):
"""Save a dashboard's metadata"""
session = db.session()
@@ -1396,7 +1396,7 @@ def save_dash(self, dashboard_id):
session.merge(dash)
session.commit()
session.close()
- return "SUCCESS"
+ return 'SUCCESS'
@staticmethod
def _set_dash_metadata(dashboard, data):
@@ -1421,7 +1421,7 @@ def _set_dash_metadata(dashboard, data):
@api
@has_access_api
- @expose("/add_slices//", methods=['POST'])
+ @expose('/add_slices//', methods=['POST'])
def add_slices(self, dashboard_id):
"""Add and save slices to a dashboard"""
data = json.loads(request.form.get('data'))
@@ -1436,11 +1436,11 @@ def add_slices(self, dashboard_id):
session.merge(dash)
session.commit()
session.close()
- return "SLICES ADDED"
+ return 'SLICES ADDED'
@api
@has_access_api
- @expose("/testconn", methods=["POST", "GET"])
+ @expose('/testconn', methods=['POST', 'GET'])
def testconn(self):
"""Tests a sqla connection"""
try:
@@ -1470,7 +1470,7 @@ def testconn(self):
db_engine.patch()
masked_url = database.get_password_masked_url_from_uri(uri)
- logging.info("Superset.testconn(). Masked URL: {0}".format(masked_url))
+ logging.info('Superset.testconn(). Masked URL: {0}'.format(masked_url))
configuration.update(
db_engine.get_configuration_for_impersonation(uri,
@@ -1485,7 +1485,7 @@ def testconn(self):
.get('connect_args', {}))
if configuration:
- connect_args["configuration"] = configuration
+ connect_args['configuration'] = configuration
engine = create_engine(uri, connect_args=connect_args)
engine.connect()
@@ -1493,12 +1493,12 @@ def testconn(self):
except Exception as e:
logging.exception(e)
return json_error_response((
- "Connection failed!\n\n"
- "The error message returned was:\n{}").format(e))
+ 'Connection failed!\n\n'
+ 'The error message returned was:\n{}').format(e))
@api
@has_access_api
- @expose("/recent_activity//", methods=['GET'])
+ @expose('/recent_activity//', methods=['GET'])
def recent_activity(self, user_id):
"""Recent activity (actions) for a given user"""
M = models # noqa
@@ -1543,7 +1543,7 @@ def recent_activity(self, user_id):
@api
@has_access_api
- @expose("/csrf_token/", methods=['GET'])
+ @expose('/csrf_token/', methods=['GET'])
def csrf_token(self):
return Response(
self.render_template('superset/csrf_token.json'),
@@ -1552,7 +1552,7 @@ def csrf_token(self):
@api
@has_access_api
- @expose("/fave_dashboards_by_username//", methods=['GET'])
+ @expose('/fave_dashboards_by_username//', methods=['GET'])
def fave_dashboards_by_username(self, username):
"""This lets us use a user's username to pull favourite dashboards"""
user = sm.find_user(username=username)
@@ -1560,7 +1560,7 @@ def fave_dashboards_by_username(self, username):
@api
@has_access_api
- @expose("/fave_dashboards//", methods=['GET'])
+ @expose('/fave_dashboards//', methods=['GET'])
def fave_dashboards(self, user_id):
qry = (
db.session.query(
@@ -1599,7 +1599,7 @@ def fave_dashboards(self, user_id):
@api
@has_access_api
- @expose("/created_dashboards//", methods=['GET'])
+ @expose('/created_dashboards//', methods=['GET'])
def created_dashboards(self, user_id):
Dash = models.Dashboard # noqa
qry = (
@@ -1628,7 +1628,7 @@ def created_dashboards(self, user_id):
@api
@has_access_api
- @expose("/created_slices//", methods=['GET'])
+ @expose('/created_slices//', methods=['GET'])
def created_slices(self, user_id):
"""List of slices created by this user"""
Slice = models.Slice # noqa
@@ -1653,7 +1653,7 @@ def created_slices(self, user_id):
@api
@has_access_api
- @expose("/fave_slices//", methods=['GET'])
+ @expose('/fave_slices//', methods=['GET'])
def fave_slices(self, user_id):
"""Favorite slices for a user"""
qry = (
@@ -1692,7 +1692,7 @@ def fave_slices(self, user_id):
@api
@has_access_api
- @expose("/warm_up_cache/", methods=['GET'])
+ @expose('/warm_up_cache/', methods=['GET'])
def warm_up_cache(self):
"""Warms up the cache for the slice or table.
@@ -1706,13 +1706,13 @@ def warm_up_cache(self):
if not slice_id and not (table_name and db_name):
return json_error_response(__(
- "Malformed request. slice_id or table_name and db_name "
- "arguments are expected"), status=400)
+ 'Malformed request. slice_id or table_name and db_name '
+ 'arguments are expected'), status=400)
if slice_id:
slices = session.query(models.Slice).filter_by(id=slice_id).all()
if not slices:
return json_error_response(__(
- "Slice %(id)s not found", id=slice_id), status=404)
+ 'Slice %(id)s not found', id=slice_id), status=404)
elif table_name and db_name:
SqlaTable = ConnectorRegistry.sources['table']
table = (
@@ -1737,10 +1737,10 @@ def warm_up_cache(self):
except Exception as e:
return json_error_response(utils.error_msg_from_exception(e))
return json_success(json.dumps(
- [{"slice_id": slc.id, "slice_name": slc.slice_name}
+ [{'slice_id': slc.id, 'slice_name': slc.slice_name}
for slc in slices]))
- @expose("/favstar////")
+ @expose('/favstar////')
def favstar(self, class_name, obj_id, action):
"""Toggle favorite stars on Slices and Dashboard"""
session = db.session()
@@ -1769,7 +1769,7 @@ def favstar(self, class_name, obj_id, action):
return json_success(json.dumps({'count': count}))
@has_access
- @expose("/dashboard//")
+ @expose('/dashboard//')
def dashboard(self, dashboard_id):
"""Server side rendering for a dashboard"""
session = db.session()
@@ -1790,7 +1790,7 @@ def dashboard(self, dashboard_id):
if datasource and not self.datasource_access(datasource):
flash(
__(get_datasource_access_error_msg(datasource.name)),
- "danger")
+ 'danger')
return redirect(
'superset/request_access/?'
'dashboard_id={dash.id}&'.format(**locals()))
@@ -1805,7 +1805,7 @@ def dashboard(**kwargs): # noqa
dash_save_perm = \
dash_edit_perm and self.can_access('can_save_dash', 'Superset')
- standalone_mode = request.args.get("standalone") == "true"
+ standalone_mode = request.args.get('standalone') == 'true'
dashboard_data = dash.data
dashboard_data.update({
@@ -1822,7 +1822,7 @@ def dashboard(**kwargs): # noqa
}
return self.render_template(
- "superset/dashboard.html",
+ 'superset/dashboard.html',
entry='dashboard',
standalone_mode=standalone_mode,
title='[dashboard] ' + dash.dashboard_title,
@@ -1830,7 +1830,7 @@ def dashboard(**kwargs): # noqa
)
@has_access
- @expose("/sync_druid/", methods=['POST'])
+ @expose('/sync_druid/', methods=['POST'])
@log_this
def sync_druid_source(self):
"""Syncs the druid datasource in main db with the provided config.
@@ -1848,9 +1848,9 @@ def sync_druid_source(self):
other fields will be ignored.
Example: {
- "name": "test_click",
- "metrics_spec": [{"type": "count", "name": "count"}],
- "dimensions": ["affiliate_id", "campaign", "first_seen"]
+ 'name': 'test_click',
+ 'metrics_spec': [{'type': 'count', 'name': 'count'}],
+ 'dimensions': ['affiliate_id', 'campaign', 'first_seen']
}
"""
payload = request.get_json(force=True)
@@ -1863,7 +1863,7 @@ def sync_druid_source(self):
DruidCluster = DruidDatasource.cluster_class
if not user:
err_msg = __("Can't find User '%(name)s', please ask your admin "
- "to create one.", name=user_name)
+ 'to create one.', name=user_name)
logging.error(err_msg)
return json_error_response(err_msg)
cluster = db.session.query(DruidCluster).filter_by(
@@ -1882,7 +1882,7 @@ def sync_druid_source(self):
return Response(status=201)
@has_access
- @expose("/sqllab_viz/", methods=['POST'])
+ @expose('/sqllab_viz/', methods=['POST'])
@log_this
def sqllab_viz(self):
SqlaTable = ConnectorRegistry.sources['table']
@@ -1922,19 +1922,19 @@ def sqllab_viz(self):
if agg:
if agg == 'count_distinct':
metrics.append(SqlMetric(
- metric_name="{agg}__{column_name}".format(**locals()),
- expression="COUNT(DISTINCT {column_name})"
+ metric_name='{agg}__{column_name}'.format(**locals()),
+ expression='COUNT(DISTINCT {column_name})'
.format(**locals()),
))
else:
metrics.append(SqlMetric(
- metric_name="{agg}__{column_name}".format(**locals()),
- expression="{agg}({column_name})".format(**locals()),
+ metric_name='{agg}__{column_name}'.format(**locals()),
+ expression='{agg}({column_name})'.format(**locals()),
))
if not metrics:
metrics.append(SqlMetric(
- metric_name="count".format(**locals()),
- expression="count(*)".format(**locals()),
+ metric_name='count'.format(**locals()),
+ expression='count(*)'.format(**locals()),
))
table.columns = cols
table.metrics = metrics
@@ -1944,7 +1944,7 @@ def sqllab_viz(self):
}))
@has_access
- @expose("/table////")
+ @expose('/table////')
@log_this
def table(self, database_id, table_name, schema):
schema = utils.js_string_to_python(schema)
@@ -1973,7 +1973,7 @@ def table(self, database_id, table_name, schema):
keys += indexes
for col in t:
- dtype = ""
+ dtype = ''
try:
dtype = '{}'.format(col['type'])
except Exception:
@@ -1999,7 +1999,7 @@ def table(self, database_id, table_name, schema):
return json_success(json.dumps(tbl))
@has_access
- @expose("/extra_table_metadata////")
+ @expose('/extra_table_metadata////')
@log_this
def extra_table_metadata(self, database_id, table_name, schema):
schema = utils.js_string_to_python(schema)
@@ -2009,32 +2009,32 @@ def extra_table_metadata(self, database_id, table_name, schema):
return json_success(json.dumps(payload))
@has_access
- @expose("/select_star///")
+ @expose('/select_star///')
@log_this
def select_star(self, database_id, table_name):
mydb = db.session.query(
models.Database).filter_by(id=database_id).first()
return self.render_template(
- "superset/ajah.html",
+ 'superset/ajah.html',
content=mydb.select_star(table_name, show_cols=True),
)
- @expose("/theme/")
+ @expose('/theme/')
def theme(self):
return self.render_template('superset/theme.html')
@has_access_api
- @expose("/cached_key//")
+ @expose('/cached_key//')
@log_this
def cached_key(self, key):
"""Returns a key from the cache"""
resp = cache.get(key)
if resp:
return resp
- return "nope"
+ return 'nope'
@has_access_api
- @expose("/results//")
+ @expose('/results//')
@log_this
def results(self, key):
"""Serves a key off of the results backend"""
@@ -2044,8 +2044,8 @@ def results(self, key):
blob = results_backend.get(key)
if not blob:
return json_error_response(
- "Data could not be retrieved. "
- "You may want to re-run the query.",
+ 'Data could not be retrieved. '
+ 'You may want to re-run the query.',
status=410,
)
@@ -2065,7 +2065,7 @@ def results(self, key):
json.dumps(payload_json, default=utils.json_iso_dttm_ser))
@has_access_api
- @expose("/stop_query/", methods=['POST'])
+ @expose('/stop_query/', methods=['POST'])
@log_this
def stop_query(self):
client_id = request.form.get('client_id')
@@ -2081,7 +2081,7 @@ def stop_query(self):
return self.json_response('OK')
@has_access_api
- @expose("/sql_json/", methods=['POST', 'GET'])
+ @expose('/sql_json/', methods=['POST', 'GET'])
@log_this
def sql_json(self):
"""Runs arbitrary sql and returns and json"""
@@ -2130,12 +2130,12 @@ def sql_json(self):
query_id = query.id
session.commit() # shouldn't be necessary
if not query_id:
- raise Exception(_("Query record was not created as expected."))
- logging.info("Triggering query_id: {}".format(query_id))
+ raise Exception(_('Query record was not created as expected.'))
+ logging.info('Triggering query_id: {}'.format(query_id))
# Async request.
if async:
- logging.info("Running query on a Celery worker")
+ logging.info('Running query on a Celery worker')
# Ignore the celery future object and the request may time out.
try:
sql_lab.get_sql_results.delay(
@@ -2144,14 +2144,14 @@ def sql_json(self):
except Exception as e:
logging.exception(e)
msg = (
- "Failed to start remote query on a worker. "
- "Tell your administrator to verify the availability of "
- "the message queue."
+ 'Failed to start remote query on a worker. '
+ 'Tell your administrator to verify the availability of '
+ 'the message queue.'
)
query.status = QueryStatus.FAILED
query.error_message = msg
session.commit()
- return json_error_response("{}".format(msg))
+ return json_error_response('{}'.format(msg))
resp = json_success(json.dumps(
{'query': query.to_dict()}, default=utils.json_int_dttm_ser,
@@ -2161,10 +2161,10 @@ def sql_json(self):
# Sync request.
try:
- timeout = config.get("SQLLAB_TIMEOUT")
+ timeout = config.get('SQLLAB_TIMEOUT')
timeout_msg = (
- "The query exceeded the {timeout} seconds "
- "timeout.").format(**locals())
+ 'The query exceeded the {timeout} seconds '
+ 'timeout.').format(**locals())
with utils.timeout(seconds=timeout,
error_message=timeout_msg):
# pylint: disable=no-value-for-parameter
@@ -2172,17 +2172,17 @@ def sql_json(self):
query_id=query_id, return_results=True)
except Exception as e:
logging.exception(e)
- return json_error_response("{}".format(e))
+ return json_error_response('{}'.format(e))
if data.get('status') == QueryStatus.FAILED:
return json_error_response(payload=data)
return json_success(json.dumps(data, default=utils.json_iso_dttm_ser))
@has_access
- @expose("/csv/")
+ @expose('/csv/')
@log_this
def csv(self, client_id):
"""Download the query results as csv."""
- logging.info("Exporting CSV file [{}]".format(client_id))
+ logging.info('Exporting CSV file [{}]'.format(client_id))
query = (
db.session.query(Query)
.filter_by(client_id=client_id)
@@ -2197,19 +2197,19 @@ def csv(self, client_id):
blob = None
if results_backend and query.results_key:
logging.info(
- "Fetching CSV from results backend "
- "[{}]".format(query.results_key))
+ 'Fetching CSV from results backend '
+ '[{}]'.format(query.results_key))
blob = results_backend.get(query.results_key)
if blob:
- logging.info("Decompressing")
+ logging.info('Decompressing')
json_payload = utils.zlib_decompress_to_string(blob)
obj = json.loads(json_payload)
columns = [c['name'] for c in obj['columns']]
df = pd.DataFrame.from_records(obj['data'], columns=columns)
- logging.info("Using pandas to convert to CSV")
+ logging.info('Using pandas to convert to CSV')
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
else:
- logging.info("Running a query to turn into CSV")
+ logging.info('Running a query to turn into CSV')
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
# TODO(bkyryliuk): add compression=gzip for big files.
@@ -2217,11 +2217,11 @@ def csv(self, client_id):
response = Response(csv, mimetype='text/csv')
response.headers['Content-Disposition'] = (
'attachment; filename={}.csv'.format(query.name))
- logging.info("Ready to return response")
+ logging.info('Ready to return response')
return response
@has_access
- @expose("/fetch_datasource_metadata")
+ @expose('/fetch_datasource_metadata')
@log_this
def fetch_datasource_metadata(self):
datasource_id, datasource_type = (
@@ -2237,13 +2237,13 @@ def fetch_datasource_metadata(self):
return json_error_response(DATASOURCE_ACCESS_ERR)
return json_success(json.dumps(datasource.data))
- @expose("/queries/")
+ @expose('/queries/')
def queries(self, last_updated_ms):
"""Get the updated queries."""
stats_logger.incr('queries')
if not g.user.get_id():
return json_error_response(
- "Please login to access the queries.", status=403)
+ 'Please login to access the queries.', status=403)
# Unix time, milliseconds.
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
@@ -2264,7 +2264,7 @@ def queries(self, last_updated_ms):
json.dumps(dict_queries, default=utils.json_int_dttm_ser))
@has_access
- @expose("/search_queries")
+ @expose('/search_queries')
@log_this
def search_queries(self):
"""Search for queries."""
@@ -2312,7 +2312,7 @@ def search_queries(self):
return Response(
json.dumps(dict_queries, default=utils.json_int_dttm_ser),
status=200,
- mimetype="application/json")
+ mimetype='application/json')
@app.errorhandler(500)
def show_traceback(self):
@@ -2321,7 +2321,7 @@ def show_traceback(self):
error_msg=get_error_msg(),
), 500
- @expose("/welcome")
+ @expose('/welcome')
def welcome(self):
"""Personalized welcome page"""
if not g.user or not g.user.get_id():
@@ -2330,7 +2330,7 @@ def welcome(self):
'superset/welcome.html', entry='welcome', utils=utils)
@has_access
- @expose("/profile//")
+ @expose('/profile//')
def profile(self, username):
"""User profile page"""
if not username and g.user:
@@ -2379,7 +2379,7 @@ def profile(self, username):
)
@has_access
- @expose("/sqllab")
+ @expose('/sqllab')
def sqllab(self):
"""SQL Editor"""
d = {
@@ -2394,7 +2394,7 @@ def sqllab(self):
@api
@has_access_api
- @expose("/slice_query//")
+ @expose('/slice_query//')
def sliceQuery(self, slice_id):
"""
This method exposes an API endpoint to
@@ -2423,14 +2423,14 @@ class CssTemplateAsyncModelView(CssTemplateModelView):
list_columns = ['template_name', 'css']
-appbuilder.add_separator("Sources")
+appbuilder.add_separator('Sources')
appbuilder.add_view(
CssTemplateModelView,
- "CSS Templates",
- label=__("CSS Templates"),
- icon="fa-css3",
- category="Manage",
- category_label=__("Manage"),
+ 'CSS Templates',
+ label=__('CSS Templates'),
+ icon='fa-css3',
+ category='Manage',
+ category_label=__('Manage'),
category_icon='')
@@ -2438,22 +2438,22 @@ class CssTemplateAsyncModelView(CssTemplateModelView):
appbuilder.add_link(
'SQL Editor',
- label=_("SQL Editor"),
+ label=_('SQL Editor'),
href='/superset/sqllab',
- category_icon="fa-flask",
- icon="fa-flask",
+ category_icon='fa-flask',
+ icon='fa-flask',
category='SQL Lab',
- category_label=__("SQL Lab"),
+ category_label=__('SQL Lab'),
)
appbuilder.add_link(
'Query Search',
- label=_("Query Search"),
+ label=_('Query Search'),
href='/superset/sqllab#search',
- icon="fa-search",
- category_icon="fa-flask",
+ icon='fa-search',
+ category_icon='fa-flask',
category='SQL Lab',
- category_label=__("SQL Lab"),
+ category_label=__('SQL Lab'),
)
diff --git a/superset/views/sql_lab.py b/superset/views/sql_lab.py
index 06afb4e302425..488a36e33c54d 100644
--- a/superset/views/sql_lab.py
+++ b/superset/views/sql_lab.py
@@ -23,11 +23,11 @@ class QueryView(SupersetModelView):
appbuilder.add_view(
QueryView,
- "Queries",
- label=__("Queries"),
- category="Manage",
- category_label=__("Manage"),
- icon="fa-search")
+ 'Queries',
+ label=__('Queries'),
+ category='Manage',
+ category_label=__('Manage'),
+ icon='fa-search')
class SavedQueryView(SupersetModelView, DeleteMixin):
@@ -78,13 +78,13 @@ class SavedQueryViewApi(SavedQueryView):
appbuilder.add_link(
__('Saved Queries'),
href='/sqllab/my_queries/',
- icon="fa-save",
+ icon='fa-save',
category='SQL Lab')
class SqlLab(BaseSupersetView):
"""The base views for Superset!"""
- @expose("/my_queries/")
+ @expose('/my_queries/')
def my_queries(self):
"""Assigns a list of found users to the given role."""
return redirect(
diff --git a/superset/viz.py b/superset/viz.py
index acc90d0b36839..e1ebe742d4a0a 100644
--- a/superset/viz.py
+++ b/superset/viz.py
@@ -1,4 +1,4 @@
-"""This module contains the "Viz" objects
+"""This module contains the 'Viz' objects
These objects represent the backend of all the visualizations that
Superset can render.
@@ -41,20 +41,20 @@ class BaseViz(object):
"""All visualizations derive this base class"""
viz_type = None
- verbose_name = "Base Viz"
- credits = ""
+ verbose_name = 'Base Viz'
+ credits = ''
is_timeseries = False
default_fillna = 0
def __init__(self, datasource, form_data):
if not datasource:
- raise Exception(_("Viz is missing a datasource"))
+ raise Exception(_('Viz is missing a datasource'))
self.datasource = datasource
self.request = request
- self.viz_type = form_data.get("viz_type")
+ self.viz_type = form_data.get('viz_type')
self.form_data = form_data
- self.query = ""
+ self.query = ''
self.token = self.form_data.get(
'token', 'token_' + uuid.uuid4().hex[:8])
self.metrics = self.form_data.get('metrics') or []
@@ -84,7 +84,7 @@ def get_df(self, query_obj=None):
if not query_obj:
query_obj = self.query_obj()
- self.error_msg = ""
+ self.error_msg = ''
self.results = None
timestamp_format = None
@@ -108,11 +108,11 @@ def get_df(self, query_obj=None):
if df is None or df.empty:
self.status = utils.QueryStatus.FAILED
if not self.error_message:
- self.error_message = "No data."
+ self.error_message = 'No data.'
return pd.DataFrame()
else:
if DTTM_ALIAS in df.columns:
- if timestamp_format in ("epoch_s", "epoch_ms"):
+ if timestamp_format in ('epoch_s', 'epoch_ms'):
df[DTTM_ALIAS] = pd.to_datetime(df[DTTM_ALIAS], utc=False)
else:
df[DTTM_ALIAS] = pd.to_datetime(
@@ -127,9 +127,9 @@ def get_df(self, query_obj=None):
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
- gb = form_data.get("groupby") or []
- metrics = form_data.get("metrics") or []
- columns = form_data.get("columns") or []
+ gb = form_data.get('groupby') or []
+ metrics = form_data.get('metrics') or []
+ columns = form_data.get('columns') or []
groupby = []
for o in gb + columns:
if o not in groupby:
@@ -144,18 +144,18 @@ def query_obj(self):
merge_extra_filters(form_data)
granularity = (
- form_data.get("granularity") or
- form_data.get("granularity_sqla")
+ form_data.get('granularity') or
+ form_data.get('granularity_sqla')
)
- limit = int(form_data.get("limit") or 0)
- timeseries_limit_metric = form_data.get("timeseries_limit_metric")
- row_limit = int(form_data.get("row_limit") or config.get("ROW_LIMIT"))
+ limit = int(form_data.get('limit') or 0)
+ timeseries_limit_metric = form_data.get('timeseries_limit_metric')
+ row_limit = int(form_data.get('row_limit') or config.get('ROW_LIMIT'))
# default order direction
- order_desc = form_data.get("order_desc", True)
+ order_desc = form_data.get('order_desc', True)
- since = form_data.get("since", "")
- until = form_data.get("until", "now")
+ since = form_data.get('since', '')
+ until = form_data.get('until', 'now')
# Backward compatibility hack
if since:
@@ -168,20 +168,20 @@ def query_obj(self):
to_dttm = utils.parse_human_datetime(until)
if from_dttm and to_dttm and from_dttm > to_dttm:
- raise Exception(_("From date cannot be larger than to date"))
+ raise Exception(_('From date cannot be larger than to date'))
self.from_dttm = from_dttm
self.to_dttm = to_dttm
- self.annotation_layers = form_data.get("annotation_layers") or []
+ self.annotation_layers = form_data.get('annotation_layers') or []
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
- 'where': form_data.get("where", ''),
- 'having': form_data.get("having", ''),
+ 'where': form_data.get('where', ''),
+ 'having': form_data.get('having', ''),
'having_druid': form_data.get('having_filters', []),
- 'time_grain_sqla': form_data.get("time_grain_sqla", ''),
- 'druid_time_origin': form_data.get("druid_time_origin", ''),
+ 'time_grain_sqla': form_data.get('time_grain_sqla', ''),
+ 'druid_time_origin': form_data.get('druid_time_origin', ''),
}
filters = form_data.get('filters', [])
d = {
@@ -211,7 +211,7 @@ def cache_timeout(self):
hasattr(self.datasource, 'database') and
self.datasource.database.cache_timeout):
return self.datasource.database.cache_timeout
- return config.get("CACHE_DEFAULT_TIMEOUT")
+ return config.get('CACHE_DEFAULT_TIMEOUT')
def get_json(self, force=False):
return json.dumps(
@@ -256,11 +256,11 @@ def get_payload(self, force=False):
cached_data = cached_data.decode('utf-8')
payload = json.loads(cached_data)
except Exception as e:
- logging.error("Error reading cache: " +
+ logging.error('Error reading cache: ' +
utils.error_msg_from_exception(e))
payload = None
return []
- logging.info("Serving from cache")
+ logging.info('Serving from cache')
if not payload:
stats_logger.incr('loaded_from_source')
@@ -293,7 +293,7 @@ def get_payload(self, force=False):
'annotations': annotations,
}
payload['cached_dttm'] = datetime.utcnow().isoformat().split('.')[0]
- logging.info("Caching for the next {} seconds".format(
+ logging.info('Caching for the next {} seconds'.format(
cache_timeout))
data = self.json_dumps(payload)
if PY3:
@@ -307,7 +307,7 @@ def get_payload(self, force=False):
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
- logging.warning("Could not cache key {}".format(cache_key))
+ logging.warning('Could not cache key {}'.format(cache_key))
logging.exception(e)
cache.delete(cache_key)
payload['is_cached'] = is_cached
@@ -344,8 +344,8 @@ class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
- viz_type = "table"
- verbose_name = _("Table View")
+ viz_type = 'table'
+ verbose_name = _('Table View')
credits = 'a Superset original'
is_timeseries = False
@@ -358,7 +358,7 @@ def should_be_timeseries(self):
)
if fd.get('include_time') and not conditions_met:
raise Exception(_(
- "Pick a granularity in the Time section or "
+ 'Pick a granularity in the Time section or '
"uncheck 'Include Time'"))
return fd.get('include_time')
@@ -368,8 +368,8 @@ def query_obj(self):
if fd.get('all_columns') and (fd.get('groupby') or fd.get('metrics')):
raise Exception(_(
- "Choose either fields to [Group By] and [Metrics] or "
- "[Columns], not both"))
+ 'Choose either fields to [Group By] and [Metrics] or '
+ '[Columns], not both'))
sort_by = fd.get('timeseries_limit_metric')
if fd.get('all_columns'):
@@ -380,7 +380,7 @@ def query_obj(self):
elif sort_by:
if sort_by not in d['metrics']:
d['metrics'] += [sort_by]
- d['orderby'] = [(sort_by, not fd.get("order_desc", True))]
+ d['orderby'] = [(sort_by, not fd.get('order_desc', True))]
# Add all percent metrics that are not already in the list
if 'percent_metrics' in fd:
@@ -420,7 +420,7 @@ def get_data(self, df):
del df[m]
return dict(
- records=df.to_dict(orient="records"),
+ records=df.to_dict(orient='records'),
columns=list(df.columns),
)
@@ -435,8 +435,8 @@ class TimeTableViz(BaseViz):
"""A data table with rich time-series related columns"""
- viz_type = "time_table"
- verbose_name = _("Time Table View")
+ viz_type = 'time_table'
+ verbose_name = _('Time Table View')
credits = 'a Superset original'
is_timeseries = True
@@ -445,12 +445,11 @@ def query_obj(self):
fd = self.form_data
if not fd.get('metrics'):
- raise Exception(_("Pick at least one metric"))
+ raise Exception(_('Pick at least one metric'))
if fd.get('groupby') and len(fd.get('metrics')) > 1:
raise Exception(_(
- "When using 'Group By' you are limited to use "
- "a single metric"))
+ "When using 'Group By' you are limited to use a single metric"))
return d
def get_data(self, df):
@@ -477,8 +476,8 @@ class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
- viz_type = "pivot_table"
- verbose_name = _("Pivot Table")
+ viz_type = 'pivot_table'
+ verbose_name = _('Pivot Table')
credits = 'a Superset original'
is_timeseries = False
@@ -492,18 +491,18 @@ def query_obj(self):
if not groupby:
groupby = []
if not groupby:
- raise Exception(_("Please choose at least one \"Group by\" field "))
+ raise Exception(_("Please choose at least one 'Group by' field "))
if not metrics:
- raise Exception(_("Please choose at least one metric"))
+ raise Exception(_('Please choose at least one metric'))
if (
any(v in groupby for v in columns) or
any(v in columns for v in groupby)):
- raise Exception(_("'Group By' and 'Columns' can't overlap"))
+ raise Exception(_("Group By' and 'Columns' can't overlap"))
return d
def get_data(self, df):
if (
- self.form_data.get("granularity") == "all" and
+ self.form_data.get('granularity') == 'all' and
DTTM_ALIAS in df):
del df[DTTM_ALIAS]
df = df.pivot_table(
@@ -521,8 +520,8 @@ def get_data(self, df):
html=df.to_html(
na_rep='',
classes=(
- "dataframe table table-striped table-bordered "
- "table-condensed table-hover").split(" ")),
+ 'dataframe table table-striped table-bordered '
+ 'table-condensed table-hover').split(' ')),
)
@@ -530,17 +529,17 @@ class MarkupViz(BaseViz):
"""Use html or markdown to create a free form widget"""
- viz_type = "markup"
- verbose_name = _("Markup")
+ viz_type = 'markup'
+ verbose_name = _('Markup')
is_timeseries = False
def get_df(self):
return True
def get_data(self, df):
- markup_type = self.form_data.get("markup_type")
- code = self.form_data.get("code", '')
- if markup_type == "markdown":
+ markup_type = self.form_data.get('markup_type')
+ code = self.form_data.get('code', '')
+ if markup_type == 'markdown':
code = markdown(code)
return dict(html=code, theme_css=get_manifest_file('theme.css'))
@@ -549,8 +548,8 @@ class SeparatorViz(MarkupViz):
"""Use to create section headers in a dashboard, similar to `Markup`"""
- viz_type = "separator"
- verbose_name = _("Separator")
+ viz_type = 'separator'
+ verbose_name = _('Separator')
class WordCloudViz(BaseViz):
@@ -561,8 +560,8 @@ class WordCloudViz(BaseViz):
https://github.com/jasondavies/d3-cloud
"""
- viz_type = "word_cloud"
- verbose_name = _("Word Cloud")
+ viz_type = 'word_cloud'
+ verbose_name = _('Word Cloud')
is_timeseries = False
def query_obj(self):
@@ -577,31 +576,31 @@ def get_data(self, df):
df = df[[self.form_data.get('series'), self.form_data.get('metric')]]
# Labeling the columns for uniform json schema
df.columns = ['text', 'size']
- return df.to_dict(orient="records")
+ return df.to_dict(orient='records')
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
- viz_type = "treemap"
- verbose_name = _("Treemap")
+ viz_type = 'treemap'
+ verbose_name = _('Treemap')
credits = 'd3.js'
is_timeseries = False
def _nest(self, metric, df):
nlevels = df.index.nlevels
if nlevels == 1:
- result = [{"name": n, "value": v}
+ result = [{'name': n, 'value': v}
for n, v in zip(df.index, df[metric])]
else:
- result = [{"name": l, "children": self._nest(metric, df.loc[l])}
+ result = [{'name': l, 'children': self._nest(metric, df.loc[l])}
for l in df.index.levels[0]]
return result
def get_data(self, df):
- df = df.set_index(self.form_data.get("groupby"))
- chart_data = [{"name": metric, "children": self._nest(metric, df)}
+ df = df.set_index(self.form_data.get('groupby'))
+ chart_data = [{'name': metric, 'children': self._nest(metric, df)}
for metric in df.columns]
return chart_data
@@ -610,8 +609,8 @@ class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
- viz_type = "cal_heatmap"
- verbose_name = _("Calendar Heatmap")
+ viz_type = 'cal_heatmap'
+ verbose_name = _('Calendar Heatmap')
credits = (
'cal-heatmap')
is_timeseries = True
@@ -619,38 +618,38 @@ class CalHeatmapViz(BaseViz):
def get_data(self, df):
form_data = self.form_data
- df.columns = ["timestamp", "metric"]
- timestamps = {str(obj["timestamp"].value / 10**9):
- obj.get("metric") for obj in df.to_dict("records")}
+ df.columns = ['timestamp', 'metric']
+ timestamps = {str(obj['timestamp'].value / 10**9):
+ obj.get('metric') for obj in df.to_dict('records')}
- start = utils.parse_human_datetime(form_data.get("since"))
- end = utils.parse_human_datetime(form_data.get("until"))
- domain = form_data.get("domain_granularity")
+ start = utils.parse_human_datetime(form_data.get('since'))
+ end = utils.parse_human_datetime(form_data.get('until'))
+ domain = form_data.get('domain_granularity')
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
- if domain == "year":
+ if domain == 'year':
range_ = diff_delta.years + 1
- elif domain == "month":
+ elif domain == 'month':
range_ = diff_delta.years * 12 + diff_delta.months + 1
- elif domain == "week":
+ elif domain == 'week':
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
- elif domain == "day":
+ elif domain == 'day':
range_ = diff_secs // (24 * 60 * 60) + 1
else:
range_ = diff_secs // (60 * 60) + 1
return {
- "timestamps": timestamps,
- "start": start,
- "domain": domain,
- "subdomain": form_data.get("subdomain_granularity"),
- "range": range_,
+ 'timestamps': timestamps,
+ 'start': start,
+ 'domain': domain,
+ 'subdomain': form_data.get('subdomain_granularity'),
+ 'range': range_,
}
def query_obj(self):
qry = super(CalHeatmapViz, self).query_obj()
- qry["metrics"] = [self.form_data["metric"]]
+ qry['metrics'] = [self.form_data['metric']]
return qry
@@ -660,7 +659,7 @@ class NVD3Viz(BaseViz):
credits = 'NVD3.org'
viz_type = None
- verbose_name = "Base NVD3 Viz"
+ verbose_name = 'Base NVD3 Viz'
is_timeseries = False
@@ -668,31 +667,31 @@ class BoxPlotViz(NVD3Viz):
"""Box plot viz from ND3"""
- viz_type = "box_plot"
- verbose_name = _("Box Plot")
+ viz_type = 'box_plot'
+ verbose_name = _('Box Plot')
sort_series = False
is_timeseries = True
def to_series(self, df, classed='', title_suffix=''):
- label_sep = " - "
+ label_sep = ' - '
chart_data = []
- for index_value, row in zip(df.index, df.to_dict(orient="records")):
+ for index_value, row in zip(df.index, df.to_dict(orient='records')):
if isinstance(index_value, tuple):
index_value = label_sep.join(index_value)
boxes = defaultdict(dict)
for (label, key), value in row.items():
- if key == "median":
- key = "Q2"
+ if key == 'median':
+ key = 'Q2'
boxes[label][key] = value
for label, box in boxes.items():
- if len(self.form_data.get("metrics")) > 1:
+ if len(self.form_data.get('metrics')) > 1:
# need to render data labels with metrics
chart_label = label_sep.join([index_value, label])
else:
chart_label = index_value
chart_data.append({
- "label": chart_label,
- "values": box,
+ 'label': chart_label,
+ 'values': box,
})
return chart_data
@@ -708,7 +707,7 @@ def Q3(series):
return np.percentile(series, 75)
whisker_type = form_data.get('whisker_options')
- if whisker_type == "Tukey":
+ if whisker_type == 'Tukey':
def whisker_high(series):
upper_outer_lim = Q3(series) + 1.5 * (Q3(series) - Q1(series))
@@ -721,7 +720,7 @@ def whisker_low(series):
series = series[series >= lower_outer_lim]
return series[np.abs(series - lower_outer_lim).argmin()]
- elif whisker_type == "Min/max (no outliers)":
+ elif whisker_type == 'Min/max (no outliers)':
def whisker_high(series):
return series.max()
@@ -729,8 +728,8 @@ def whisker_high(series):
def whisker_low(series):
return series.min()
- elif " percentiles" in whisker_type:
- low, high = whisker_type.replace(" percentiles", "").split("/")
+ elif ' percentiles' in whisker_type:
+ low, high = whisker_type.replace(' percentiles', '').split('/')
def whisker_high(series):
return np.percentile(series, int(high))
@@ -739,7 +738,7 @@ def whisker_low(series):
return np.percentile(series, int(low))
else:
- raise ValueError("Unknown whisker type: {}".format(whisker_type))
+ raise ValueError('Unknown whisker type: {}'.format(whisker_type))
def outliers(series):
above = series[series > whisker_high(series)]
@@ -757,8 +756,8 @@ class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
- viz_type = "bubble"
- verbose_name = _("Bubble Chart")
+ viz_type = 'bubble'
+ verbose_name = _('Bubble Chart')
is_timeseries = False
def query_obj(self):
@@ -782,7 +781,7 @@ def query_obj(self):
self.y_metric,
]
if not all(d['metrics'] + [self.entity]):
- raise Exception(_("Pick a metric for x, y and size"))
+ raise Exception(_('Pick a metric for x, y and size'))
return d
def get_data(self, df):
@@ -807,8 +806,8 @@ class BulletViz(NVD3Viz):
"""Based on the NVD3 bullet chart"""
- viz_type = "bullet"
- verbose_name = _("Bullet Chart")
+ viz_type = 'bullet'
+ verbose_name = _('Bullet Chart')
is_timeseries = False
def query_obj(self):
@@ -834,7 +833,7 @@ def as_floats(field):
self.metric,
]
if not self.metric:
- raise Exception(_("Pick a metric to display"))
+ raise Exception(_('Pick a metric to display'))
return d
def get_data(self, df):
@@ -856,8 +855,8 @@ class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
- viz_type = "big_number"
- verbose_name = _("Big Number with Trendline")
+ viz_type = 'big_number'
+ verbose_name = _('Big Number with Trendline')
credits = 'a Superset original'
is_timeseries = True
@@ -865,7 +864,7 @@ def query_obj(self):
d = super(BigNumberViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
- raise Exception(_("Pick a metric!"))
+ raise Exception(_('Pick a metric!'))
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
@@ -873,7 +872,7 @@ def query_obj(self):
def get_data(self, df):
form_data = self.form_data
df.sort_values(by=df.columns[0], inplace=True)
- compare_lag = form_data.get("compare_lag")
+ compare_lag = form_data.get('compare_lag')
return {
'data': df.values.tolist(),
'compare_lag': compare_lag,
@@ -885,8 +884,8 @@ class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
- viz_type = "big_number_total"
- verbose_name = _("Big Number")
+ viz_type = 'big_number_total'
+ verbose_name = _('Big Number')
credits = 'a Superset original'
is_timeseries = False
@@ -894,7 +893,7 @@ def query_obj(self):
d = super(BigNumberTotalViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
- raise Exception(_("Pick a metric!"))
+ raise Exception(_('Pick a metric!'))
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
@@ -912,8 +911,8 @@ class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
- viz_type = "line"
- verbose_name = _("Time Series - Line Chart")
+ viz_type = 'line'
+ verbose_name = _('Time Series - Line Chart')
sort_series = False
is_timeseries = True
@@ -932,7 +931,7 @@ def to_series(self, df, classed='', title_suffix=''):
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
- if df[name].dtype.kind not in "biufc":
+ if df[name].dtype.kind not in 'biufc':
continue
series_title = name
if (
@@ -947,9 +946,9 @@ def to_series(self, df, classed='', title_suffix=''):
series_title = series_title + (title_suffix,)
d = {
- "key": series_title,
- "classed": classed,
- "values": [
+ 'key': series_title,
+ 'classed': classed,
+ 'values': [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.index
],
@@ -960,8 +959,8 @@ def to_series(self, df, classed='', title_suffix=''):
def process_data(self, df, aggregate=False):
fd = self.form_data
df = df.fillna(0)
- if fd.get("granularity") == "all":
- raise Exception(_("Pick a time granularity for your time series"))
+ if fd.get('granularity') == 'all':
+ raise Exception(_('Pick a time granularity for your time series'))
if not aggregate:
df = df.pivot_table(
@@ -976,11 +975,11 @@ def process_data(self, df, aggregate=False):
fill_value=0,
aggfunc=sum)
- fm = fd.get("resample_fillmethod")
+ fm = fd.get('resample_fillmethod')
if not fm:
fm = None
- how = fd.get("resample_how")
- rule = fd.get("resample_rule")
+ how = fd.get('resample_how')
+ rule = fd.get('resample_rule')
if how and rule:
df = df.resample(rule, how=how, fill_method=fm)
if not fm:
@@ -991,13 +990,13 @@ def process_data(self, df, aggregate=False):
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
- if fd.get("contribution"):
+ if fd.get('contribution'):
dft = df.T
df = (dft / dft.sum()).T
- rolling_type = fd.get("rolling_type")
- rolling_periods = int(fd.get("rolling_periods") or 0)
- min_periods = int(fd.get("min_periods") or 0)
+ rolling_type = fd.get('rolling_type')
+ rolling_periods = int(fd.get('rolling_periods') or 0)
+ min_periods = int(fd.get('min_periods') or 0)
if rolling_type in ('mean', 'std', 'sum') and rolling_periods:
kwargs = dict(
@@ -1015,7 +1014,7 @@ def process_data(self, df, aggregate=False):
if min_periods:
df = df[min_periods:]
- num_period_compare = fd.get("num_period_compare")
+ num_period_compare = fd.get('num_period_compare')
if num_period_compare:
num_period_compare = int(num_period_compare)
prt = fd.get('period_ratio_type')
@@ -1047,7 +1046,7 @@ def get_data(self, df):
df2[DTTM_ALIAS] += delta
df2 = self.process_data(df2)
chart_data += self.to_series(
- df2, classed='superset', title_suffix="---")
+ df2, classed='superset', title_suffix='---')
chart_data = sorted(chart_data, key=lambda x: x['key'])
return chart_data
@@ -1056,8 +1055,8 @@ class NVD3DualLineViz(NVD3Viz):
"""A rich line chart with dual axis"""
- viz_type = "dual_line"
- verbose_name = _("Time Series - Dual Axis Line Chart")
+ viz_type = 'dual_line'
+ verbose_name = _('Time Series - Dual Axis Line Chart')
sort_series = False
is_timeseries = True
@@ -1067,12 +1066,12 @@ def query_obj(self):
m2 = self.form_data.get('metric_2')
d['metrics'] = [m1, m2]
if not m1:
- raise Exception(_("Pick a metric for left axis!"))
+ raise Exception(_('Pick a metric for left axis!'))
if not m2:
- raise Exception(_("Pick a metric for right axis!"))
+ raise Exception(_('Pick a metric for right axis!'))
if m1 == m2:
- raise Exception(_("Please choose different metrics"
- " on left and right axis"))
+ raise Exception(_('Please choose different metrics'
+ ' on left and right axis'))
return d
def to_series(self, df, classed=''):
@@ -1093,18 +1092,18 @@ def to_series(self, df, classed=''):
]
for i, m in enumerate(metrics):
ys = series[m]
- if df[m].dtype.kind not in "biufc":
+ if df[m].dtype.kind not in 'biufc':
continue
series_title = m
d = {
- "key": series_title,
- "classed": classed,
- "values": [
+ 'key': series_title,
+ 'classed': classed,
+ 'values': [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.index
],
- "yAxis": i + 1,
- "type": "line",
+ 'yAxis': i + 1,
+ 'type': 'line',
}
chart_data.append(d)
return chart_data
@@ -1113,8 +1112,8 @@ def get_data(self, df):
fd = self.form_data
df = df.fillna(0)
- if self.form_data.get("granularity") == "all":
- raise Exception(_("Pick a time granularity for your time series"))
+ if self.form_data.get('granularity') == 'all':
+ raise Exception(_('Pick a time granularity for your time series'))
metric = fd.get('metric')
metric_2 = fd.get('metric_2')
@@ -1130,9 +1129,9 @@ class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
- viz_type = "bar"
+ viz_type = 'bar'
sort_series = True
- verbose_name = _("Time Series - Bar Chart")
+ verbose_name = _('Time Series - Bar Chart')
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
@@ -1140,15 +1139,15 @@ class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = 'compare'
- verbose_name = _("Time Series - Percent Change")
+ verbose_name = _('Time Series - Percent Change')
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
- viz_type = "area"
- verbose_name = _("Time Series - Stacked")
+ viz_type = 'area'
+ verbose_name = _('Time Series - Stacked')
sort_series = True
@@ -1156,8 +1155,8 @@ class DistributionPieViz(NVD3Viz):
"""Annoy visualization snobs with this controversial pie chart"""
- viz_type = "pie"
- verbose_name = _("Distribution - NVD3 - Pie Chart")
+ viz_type = 'pie'
+ verbose_name = _('Distribution - NVD3 - Pie Chart')
is_timeseries = False
def get_data(self, df):
@@ -1167,15 +1166,15 @@ def get_data(self, df):
df.sort_values(by=self.metrics[0], ascending=False, inplace=True)
df = df.reset_index()
df.columns = ['x', 'y']
- return df.to_dict(orient="records")
+ return df.to_dict(orient='records')
class HistogramViz(BaseViz):
"""Histogram"""
- viz_type = "histogram"
- verbose_name = _("Histogram")
+ viz_type = 'histogram'
+ verbose_name = _('Histogram')
is_timeseries = False
def query_obj(self):
@@ -1185,7 +1184,7 @@ def query_obj(self):
'row_limit', int(config.get('VIZ_ROW_LIMIT')))
numeric_column = self.form_data.get('all_columns_x')
if numeric_column is None:
- raise Exception(_("Must have one numeric column specified"))
+ raise Exception(_('Must have one numeric column specified'))
d['columns'] = [numeric_column]
return d
@@ -1199,8 +1198,8 @@ class DistributionBarViz(DistributionPieViz):
"""A good old bar chart"""
- viz_type = "dist_bar"
- verbose_name = _("Distribution - Bar Chart")
+ viz_type = 'dist_bar'
+ verbose_name = _('Distribution - Bar Chart')
is_timeseries = False
def query_obj(self):
@@ -1213,9 +1212,9 @@ def query_obj(self):
raise Exception(
_("Can't have overlap between Series and Breakdowns"))
if not fd.get('metrics'):
- raise Exception(_("Pick at least one metric"))
+ raise Exception(_('Pick at least one metric'))
if not fd.get('groupby'):
- raise Exception(_("Pick at least one field for [Series]"))
+ raise Exception(_('Pick at least one field for [Series]'))
return d
def get_data(self, df):
@@ -1228,22 +1227,22 @@ def get_data(self, df):
index=self.groupby,
columns=columns,
values=self.metrics)
- if fd.get("contribution"):
+ if fd.get('contribution'):
pt = pt.fillna(0)
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
chart_data = []
for name, ys in pt.iteritems():
- if pt[name].dtype.kind not in "biufc" or name in self.groupby:
+ if pt[name].dtype.kind not in 'biufc' or name in self.groupby:
continue
if isinstance(name, string_types):
series_title = name
elif len(self.metrics) > 1:
- series_title = ", ".join(name)
+ series_title = ', '.join(name)
else:
l = [str(s) for s in name[1:]] # noqa: E741
- series_title = ", ".join(l)
+ series_title = ', '.join(l)
values = []
for i, v in ys.iteritems():
x = i
@@ -1256,8 +1255,8 @@ def get_data(self, df):
'y': v,
})
d = {
- "key": series_title,
- "values": values,
+ 'key': series_title,
+ 'values': values,
}
chart_data.append(d)
return chart_data
@@ -1267,8 +1266,8 @@ class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
- viz_type = "sunburst"
- verbose_name = _("Sunburst")
+ viz_type = 'sunburst'
+ verbose_name = _('Sunburst')
is_timeseries = False
credits = (
'Kerry Rodden '
@@ -1287,7 +1286,7 @@ def get_data(self, df):
cols += [
self.form_data['metric'], self.form_data['secondary_metric']]
ndf = df[cols]
- return json.loads(ndf.to_json(orient="values")) # TODO fix this nonsense
+ return json.loads(ndf.to_json(orient='values')) # TODO fix this nonsense
def query_obj(self):
qry = super(SunburstViz, self).query_obj()
@@ -1300,15 +1299,15 @@ class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
- viz_type = "sankey"
- verbose_name = _("Sankey")
+ viz_type = 'sankey'
+ verbose_name = _('Sankey')
is_timeseries = False
credits = 'd3-sankey on npm'
def query_obj(self):
qry = super(SankeyViz, self).query_obj()
if len(qry['groupby']) != 2:
- raise Exception(_("Pick exactly 2 columns as [Source / Target]"))
+ raise Exception(_('Pick exactly 2 columns as [Source / Target]'))
qry['metrics'] = [
self.form_data['metric']]
return qry
@@ -1349,8 +1348,8 @@ class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
- viz_type = "directed_force"
- verbose_name = _("Directed Force Layout")
+ viz_type = 'directed_force'
+ verbose_name = _('Directed Force Layout')
credits = 'd3noob @bl.ocks.org'
is_timeseries = False
@@ -1370,8 +1369,8 @@ class ChordViz(BaseViz):
"""A Chord diagram"""
- viz_type = "chord"
- verbose_name = _("Directed Force Layout")
+ viz_type = 'chord'
+ verbose_name = _('Directed Force Layout')
credits = 'Bostock'
is_timeseries = False
@@ -1403,8 +1402,8 @@ class CountryMapViz(BaseViz):
"""A country centric"""
- viz_type = "country_map"
- verbose_name = _("Country Map")
+ viz_type = 'country_map'
+ verbose_name = _('Country Map')
is_timeseries = False
credits = 'From bl.ocks.org By john-guerra'
@@ -1431,8 +1430,8 @@ class WorldMapViz(BaseViz):
"""A country centric world map"""
- viz_type = "world_map"
- verbose_name = _("World Map")
+ viz_type = 'world_map'
+ verbose_name = _('World Map')
is_timeseries = False
credits = 'datamaps on npm'
@@ -1473,7 +1472,7 @@ def get_data(self, df):
row['longitude'] = country['lng']
row['name'] = country['name']
else:
- row['country'] = "XXX"
+ row['country'] = 'XXX'
return d
@@ -1481,8 +1480,8 @@ class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
- viz_type = "filter_box"
- verbose_name = _("Filters")
+ viz_type = 'filter_box'
+ verbose_name = _('Filters')
is_timeseries = False
credits = 'a Superset original'
@@ -1490,7 +1489,7 @@ def query_obj(self):
qry = super(FilterBoxViz, self).query_obj()
groupby = self.form_data.get('groupby')
if len(groupby) < 1 and not self.form_data.get('date_filter'):
- raise Exception(_("Pick at least one filter field"))
+ raise Exception(_('Pick at least one filter field'))
qry['metrics'] = [
self.form_data['metric']]
return qry
@@ -1516,8 +1515,8 @@ class IFrameViz(BaseViz):
"""You can squeeze just about anything in this iFrame component"""
- viz_type = "iframe"
- verbose_name = _("iFrame")
+ viz_type = 'iframe'
+ verbose_name = _('iFrame')
credits = 'a Superset original'
is_timeseries = False
@@ -1533,11 +1532,11 @@ class ParallelCoordinatesViz(BaseViz):
https://github.com/syntagmatic/parallel-coordinates
"""
- viz_type = "para"
- verbose_name = _("Parallel Coordinates")
+ viz_type = 'para'
+ verbose_name = _('Parallel Coordinates')
credits = (
''
- 'Syntagmatic\'s library')
+ "Syntagmatic's library")
is_timeseries = False
def query_obj(self):
@@ -1551,15 +1550,15 @@ def query_obj(self):
return d
def get_data(self, df):
- return df.to_dict(orient="records")
+ return df.to_dict(orient='records')
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
- viz_type = "heatmap"
- verbose_name = _("Heatmap")
+ viz_type = 'heatmap'
+ verbose_name = _('Heatmap')
is_timeseries = False
credits = (
'inspired from mbostock @'
@@ -1605,7 +1604,7 @@ def get_data(self, df):
if overall:
df['perc'] = (df.v - min_) / (max_ - min_)
return {
- 'records': df.to_dict(orient="records"),
+ 'records': df.to_dict(orient='records'),
'extents': [min_, max_],
}
@@ -1617,8 +1616,8 @@ class HorizonViz(NVD3TimeSeriesViz):
https://www.npmjs.com/package/d3-horizon-chart
"""
- viz_type = "horizon"
- verbose_name = _("Horizon Charts")
+ viz_type = 'horizon'
+ verbose_name = _('Horizon Charts')
credits = (
''
'd3-horizon-chart')
@@ -1628,8 +1627,8 @@ class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
- viz_type = "mapbox"
- verbose_name = _("Mapbox")
+ viz_type = 'mapbox'
+ verbose_name = _('Mapbox')
is_timeseries = False
credits = (
'Mapbox GL JS')
@@ -1643,7 +1642,7 @@ def query_obj(self):
d['columns'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
if label_col and len(label_col) >= 1:
- if label_col[0] == "count":
+ if label_col[0] == 'count':
raise Exception(_(
"Must have a [Group By] column to have 'count' as the [Label]"))
d['columns'].append(label_col[0])
@@ -1655,20 +1654,20 @@ def query_obj(self):
else:
# Ensuring columns chosen are all in group by
if (label_col and len(label_col) >= 1 and
- label_col[0] != "count" and
+ label_col[0] != 'count' and
label_col[0] not in fd.get('groupby')):
raise Exception(_(
- "Choice of [Label] must be present in [Group By]"))
+ 'Choice of [Label] must be present in [Group By]'))
- if (fd.get("point_radius") != "Auto" and
- fd.get("point_radius") not in fd.get('groupby')):
+ if (fd.get('point_radius') != 'Auto' and
+ fd.get('point_radius') not in fd.get('groupby')):
raise Exception(_(
- "Choice of [Point Radius] must be present in [Group By]"))
+ 'Choice of [Point Radius] must be present in [Group By]'))
if (fd.get('all_columns_x') not in fd.get('groupby') or
fd.get('all_columns_y') not in fd.get('groupby')):
raise Exception(_(
- "[Longitude] and [Latitude] columns must be present in [Group By]"))
+ '[Longitude] and [Latitude] columns must be present in [Group By]'))
return d
def get_data(self, df):
@@ -1685,22 +1684,22 @@ def get_data(self, df):
metric_col = df[label_col[0]]
point_radius_col = (
[None] * len(df.index)
- if fd.get("point_radius") == "Auto"
- else df[fd.get("point_radius")])
+ if fd.get('point_radius') == 'Auto'
+ else df[fd.get('point_radius')])
# using geoJSON formatting
geo_json = {
- "type": "FeatureCollection",
- "features": [
+ 'type': 'FeatureCollection',
+ 'features': [
{
- "type": "Feature",
- "properties": {
- "metric": metric,
- "radius": point_radius,
+ 'type': 'Feature',
+ 'properties': {
+ 'metric': metric,
+ 'radius': point_radius,
},
- "geometry": {
- "type": "Point",
- "coordinates": [lon, lat],
+ 'geometry': {
+ 'type': 'Point',
+ 'coordinates': [lon, lat],
},
}
for lon, lat, metric, point_radius
@@ -1712,28 +1711,28 @@ def get_data(self, df):
}
return {
- "geoJSON": geo_json,
- "customMetric": custom_metric,
- "mapboxApiKey": config.get('MAPBOX_API_KEY'),
- "mapStyle": fd.get("mapbox_style"),
- "aggregatorName": fd.get("pandas_aggfunc"),
- "clusteringRadius": fd.get("clustering_radius"),
- "pointRadiusUnit": fd.get("point_radius_unit"),
- "globalOpacity": fd.get("global_opacity"),
- "viewportLongitude": fd.get("viewport_longitude"),
- "viewportLatitude": fd.get("viewport_latitude"),
- "viewportZoom": fd.get("viewport_zoom"),
- "renderWhileDragging": fd.get("render_while_dragging"),
- "tooltip": fd.get("rich_tooltip"),
- "color": fd.get("mapbox_color"),
+ 'geoJSON': geo_json,
+ 'customMetric': custom_metric,
+ 'mapboxApiKey': config.get('MAPBOX_API_KEY'),
+ 'mapStyle': fd.get('mapbox_style'),
+ 'aggregatorName': fd.get('pandas_aggfunc'),
+ 'clusteringRadius': fd.get('clustering_radius'),
+ 'pointRadiusUnit': fd.get('point_radius_unit'),
+ 'globalOpacity': fd.get('global_opacity'),
+ 'viewportLongitude': fd.get('viewport_longitude'),
+ 'viewportLatitude': fd.get('viewport_latitude'),
+ 'viewportZoom': fd.get('viewport_zoom'),
+ 'renderWhileDragging': fd.get('render_while_dragging'),
+ 'tooltip': fd.get('rich_tooltip'),
+ 'color': fd.get('mapbox_color'),
}
class EventFlowViz(BaseViz):
"""A visualization to explore patterns in event sequences"""
- viz_type = "event_flow"
- verbose_name = _("Event flow")
+ viz_type = 'event_flow'
+ verbose_name = _('Event flow')
credits = 'from @data-ui'
is_timeseries = True
@@ -1756,7 +1755,7 @@ def query_obj(self):
return query
def get_data(self, df):
- return df.to_dict(orient="records")
+ return df.to_dict(orient='records')
class PairedTTestViz(BaseViz):
@@ -1764,7 +1763,7 @@ class PairedTTestViz(BaseViz):
"""A table displaying paired t-test values"""
viz_type = 'paired_ttest'
- verbose_name = _("Time Series - Paired t-test")
+ verbose_name = _('Time Series - Paired t-test')
sort_series = False
is_timeseries = True
@@ -1772,7 +1771,7 @@ def get_data(self, df):
"""
Transform received data frame into an object of the form:
{
- "metric1": [
+ 'metric1': [
{
groups: ('groupA', ... ),
values: [ {x, y}, ... ],
@@ -1826,7 +1825,7 @@ class PartitionViz(NVD3TimeSeriesViz):
"""
viz_type = 'partition'
- verbose_name = _("Partition Diagram")
+ verbose_name = _('Partition Diagram')
def query_obj(self):
query_obj = super(PartitionViz, self).query_obj()
diff --git a/tests/access_tests.py b/tests/access_tests.py
index 437a75e1628b0..2f8140f290b33 100644
--- a/tests/access_tests.py
+++ b/tests/access_tests.py
@@ -446,7 +446,7 @@ def test_request_access(self):
# request access to the table
resp = self.get_resp(
ACCESS_REQUEST.format('table', table_1_id, 'go'))
- assert "Access was requested" in resp
+ assert 'Access was requested' in resp
access_request1 = self.get_access_requests('gamma', 'table', table_1_id)
assert access_request1 is not None
@@ -463,7 +463,7 @@ def test_request_access(self):
alpha_role,
sm.find_permission_view_menu('datasource_access', table3_perm))
sm.add_permission_role(
- sm.find_role("energy_usage_role"),
+ sm.find_role('energy_usage_role'),
sm.find_permission_view_menu('datasource_access', table3_perm))
session.commit()
diff --git a/tests/base_tests.py b/tests/base_tests.py
index f18dfe2fd8e71..1b213faed7a5c 100644
--- a/tests/base_tests.py
+++ b/tests/base_tests.py
@@ -19,7 +19,7 @@
os.environ['SUPERSET_CONFIG'] = 'tests.superset_test_config'
-BASE_DIR = app.config.get("BASE_DIR")
+BASE_DIR = app.config.get('BASE_DIR')
class SupersetTestCase(unittest.TestCase):
@@ -32,9 +32,9 @@ def __init__(self, *args, **kwargs):
not os.environ.get('SOLO_TEST') and
not os.environ.get('examples_loaded')
):
- logging.info("Loading examples")
+ logging.info('Loading examples')
cli.load_examples(load_test_data=True)
- logging.info("Done loading examples")
+ logging.info('Done loading examples')
sync_role_definitions()
os.environ['examples_loaded'] = '1'
else:
@@ -43,7 +43,7 @@ def __init__(self, *args, **kwargs):
self.client = app.test_client()
self.maxDiff = None
- gamma_sqllab_role = sm.add_role("gamma_sqllab")
+ gamma_sqllab_role = sm.add_role('gamma_sqllab')
for perm in sm.find_role('Gamma').permissions:
sm.add_permission_role(gamma_sqllab_role, perm)
db_perm = self.get_main_database(sm.get_session).perm
@@ -92,11 +92,11 @@ def __init__(self, *args, **kwargs):
session = db.session
cluster = (
session.query(DruidCluster)
- .filter_by(cluster_name="druid_test")
+ .filter_by(cluster_name='druid_test')
.first()
)
if not cluster:
- cluster = DruidCluster(cluster_name="druid_test")
+ cluster = DruidCluster(cluster_name='druid_test')
session.add(cluster)
session.commit()
@@ -155,7 +155,7 @@ def get_resp(
resp = self.client.get(url, follow_redirects=follow_redirects)
if raise_on_error and resp.status_code > 400:
raise Exception(
- "http request failed with code {}".format(resp.status_code))
+ 'http request failed with code {}'.format(resp.status_code))
return resp.data.decode('utf-8')
def get_json_resp(
@@ -214,7 +214,7 @@ def run_sql(self, sql, client_id, user_name=None, raise_on_error=False):
client_id=client_id),
)
if raise_on_error and 'error' in resp:
- raise Exception("run_sql failed")
+ raise Exception('run_sql failed')
return resp
def test_gamma_permissions(self):
diff --git a/tests/celery_tests.py b/tests/celery_tests.py
index b171b5259db7b..591e7939450f9 100644
--- a/tests/celery_tests.py
+++ b/tests/celery_tests.py
@@ -40,32 +40,32 @@ class UtilityFunctionTests(SupersetTestCase):
# TODO(bkyryliuk): support more cases in CTA function.
def test_create_table_as(self):
- q = SupersetQuery("SELECT * FROM outer_space;")
+ q = SupersetQuery('SELECT * FROM outer_space;')
self.assertEqual(
- "CREATE TABLE tmp AS \nSELECT * FROM outer_space",
- q.as_create_table("tmp"))
+ 'CREATE TABLE tmp AS \nSELECT * FROM outer_space',
+ q.as_create_table('tmp'))
self.assertEqual(
- "DROP TABLE IF EXISTS tmp;\n"
- "CREATE TABLE tmp AS \nSELECT * FROM outer_space",
- q.as_create_table("tmp", overwrite=True))
+ 'DROP TABLE IF EXISTS tmp;\n'
+ 'CREATE TABLE tmp AS \nSELECT * FROM outer_space',
+ q.as_create_table('tmp', overwrite=True))
# now without a semicolon
- q = SupersetQuery("SELECT * FROM outer_space")
+ q = SupersetQuery('SELECT * FROM outer_space')
self.assertEqual(
- "CREATE TABLE tmp AS \nSELECT * FROM outer_space",
- q.as_create_table("tmp"))
+ 'CREATE TABLE tmp AS \nSELECT * FROM outer_space',
+ q.as_create_table('tmp'))
# now a multi-line query
multi_line_query = (
- "SELECT * FROM planets WHERE\n"
+ 'SELECT * FROM planets WHERE\n'
"Luke_Father = 'Darth Vader'")
q = SupersetQuery(multi_line_query)
self.assertEqual(
- "CREATE TABLE tmp AS \nSELECT * FROM planets WHERE\n"
+ 'CREATE TABLE tmp AS \nSELECT * FROM planets WHERE\n'
"Luke_Father = 'Darth Vader'",
- q.as_create_table("tmp"),
+ q.as_create_table('tmp'),
)
@@ -118,8 +118,7 @@ def tearDownClass(cls):
shell=True,
)
subprocess.call(
- "ps auxww | grep 'superset worker' | awk '{print $2}' | "
- "xargs kill -9",
+ "ps auxww | grep 'superset worker' | awk '{print $2}' | xargs kill -9",
shell=True,
)
@@ -143,22 +142,22 @@ def run_sql(self, db_id, sql, client_id, cta='false', tmp_table='tmp',
def test_add_limit_to_the_query(self):
main_db = self.get_main_database(db.session)
- select_query = "SELECT * FROM outer_space;"
+ select_query = 'SELECT * FROM outer_space;'
updated_select_query = main_db.wrap_sql_limit(select_query, 100)
# Different DB engines have their own spacing while compiling
# the queries, that's why ' '.join(query.split()) is used.
# In addition some of the engines do not include OFFSET 0.
self.assertTrue(
- "SELECT * FROM (SELECT * FROM outer_space;) AS inner_qry "
- "LIMIT 100" in ' '.join(updated_select_query.split()),
+ 'SELECT * FROM (SELECT * FROM outer_space;) AS inner_qry '
+ 'LIMIT 100' in ' '.join(updated_select_query.split()),
)
- select_query_no_semicolon = "SELECT * FROM outer_space"
+ select_query_no_semicolon = 'SELECT * FROM outer_space'
updated_select_query_no_semicolon = main_db.wrap_sql_limit(
select_query_no_semicolon, 100)
self.assertTrue(
- "SELECT * FROM (SELECT * FROM outer_space) AS inner_qry "
- "LIMIT 100" in
+ 'SELECT * FROM (SELECT * FROM outer_space) AS inner_qry '
+ 'LIMIT 100' in
' '.join(updated_select_query_no_semicolon.split()),
)
@@ -167,7 +166,7 @@ def test_add_limit_to_the_query(self):
)
updated_multi_line_query = main_db.wrap_sql_limit(multi_line_query, 100)
self.assertTrue(
- "SELECT * FROM (SELECT * FROM planets WHERE "
+ 'SELECT * FROM (SELECT * FROM planets WHERE '
"Luke_Father = 'Darth Vader';) AS inner_qry LIMIT 100" in
' '.join(updated_multi_line_query.split()),
)
@@ -176,7 +175,7 @@ def test_run_sync_query_dont_exist(self):
main_db = self.get_main_database(db.session)
db_id = main_db.id
sql_dont_exist = 'SELECT name FROM table_dont_exist'
- result1 = self.run_sql(db_id, sql_dont_exist, "1", cta='true')
+ result1 = self.run_sql(db_id, sql_dont_exist, '1', cta='true')
self.assertTrue('error' in result1)
def test_run_sync_query_cta(self):
@@ -187,7 +186,7 @@ def test_run_sync_query_cta(self):
sql_where = (
"SELECT name FROM ab_permission WHERE name='{}'".format(perm_name))
result2 = self.run_sql(
- db_id, sql_where, "2", tmp_table='tmp_table_2', cta='true')
+ db_id, sql_where, '2', tmp_table='tmp_table_2', cta='true')
self.assertEqual(QueryStatus.SUCCESS, result2['query']['state'])
self.assertEqual([], result2['data'])
self.assertEqual([], result2['columns'])
@@ -203,7 +202,7 @@ def test_run_sync_query_cta_no_data(self):
db_id = main_db.id
sql_empty_result = 'SELECT * FROM ab_user WHERE id=666'
result3 = self.run_sql(
- db_id, sql_empty_result, "3", tmp_table='tmp_table_3', cta='true')
+ db_id, sql_empty_result, '3', tmp_table='tmp_table_3', cta='true')
self.assertEqual(QueryStatus.SUCCESS, result3['query']['state'])
self.assertEqual([], result3['data'])
self.assertEqual([], result3['columns'])
@@ -216,7 +215,7 @@ def test_run_async_query(self):
eng = main_db.get_sqla_engine()
sql_where = "SELECT name FROM ab_role WHERE name='Admin'"
result = self.run_sql(
- main_db.id, sql_where, "4", async='true', tmp_table='tmp_async_1',
+ main_db.id, sql_where, '4', async='true', tmp_table='tmp_async_1',
cta='true')
assert result['query']['state'] in (
QueryStatus.PENDING, QueryStatus.RUNNING, QueryStatus.SUCCESS)
@@ -228,10 +227,10 @@ def test_run_async_query(self):
self.assertEqual(QueryStatus.SUCCESS, query.status)
self.assertEqual([{'name': 'Admin'}], df.to_dict(orient='records'))
self.assertEqual(QueryStatus.SUCCESS, query.status)
- self.assertTrue("FROM tmp_async_1" in query.select_sql)
- self.assertTrue("LIMIT 666" in query.select_sql)
+ self.assertTrue('FROM tmp_async_1' in query.select_sql)
+ self.assertTrue('LIMIT 666' in query.select_sql)
self.assertEqual(
- "CREATE TABLE tmp_async_1 AS \nSELECT name FROM ab_role "
+ 'CREATE TABLE tmp_async_1 AS \nSELECT name FROM ab_role '
"WHERE name='Admin'", query.executed_sql)
self.assertEqual(sql_where, query.sql)
self.assertEqual(0, query.rows)
@@ -254,7 +253,7 @@ def dictify_list_of_dicts(cls, l, k):
def test_get_columns(self):
main_db = self.get_main_database(db.session)
- df = main_db.get_df("SELECT * FROM multiformat_time_series", None)
+ df = main_db.get_df('SELECT * FROM multiformat_time_series', None)
cdf = dataframe.SupersetDataFrame(df)
# Making ordering non-deterministic
diff --git a/tests/core_tests.py b/tests/core_tests.py
index 44da324a80208..a82106465cea7 100644
--- a/tests/core_tests.py
+++ b/tests/core_tests.py
@@ -68,7 +68,7 @@ def test_welcome(self):
def test_slice_endpoint(self):
self.login(username='admin')
- slc = self.get_slice("Girls", db.session)
+ slc = self.get_slice('Girls', db.session)
resp = self.get_resp('/superset/slice/{}/'.format(slc.id))
assert 'Time Column' in resp
assert 'List Roles' in resp
@@ -80,7 +80,7 @@ def test_slice_endpoint(self):
def test_slice_json_endpoint(self):
self.login(username='admin')
- slc = self.get_slice("Girls", db.session)
+ slc = self.get_slice('Girls', db.session)
json_endpoint = (
'/superset/explore_json/{}/{}?form_data={}'
@@ -91,7 +91,7 @@ def test_slice_json_endpoint(self):
def test_slice_csv_endpoint(self):
self.login(username='admin')
- slc = self.get_slice("Girls", db.session)
+ slc = self.get_slice('Girls', db.session)
csv_endpoint = (
'/superset/explore_json/{}/{}?csv=true&form_data={}'
@@ -129,16 +129,16 @@ def assert_admin_view_menus_in(role_name, assert_func):
def test_save_slice(self):
self.login(username='admin')
- slice_name = "Energy Sankey"
+ slice_name = 'Energy Sankey'
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
- copy_name = "Test Sankey Save"
+ copy_name = 'Test Sankey Save'
tbl_id = self.table_ids.get('energy_usage')
- new_slice_name = "Test Sankey Overwirte"
+ new_slice_name = 'Test Sankey Overwirte'
url = (
- "/superset/explore/table/{}/?slice_name={}&"
- "action={}&datasource_name=energy_usage&form_data={}")
+ '/superset/explore/table/{}/?slice_name={}&'
+ 'action={}&datasource_name=energy_usage&form_data={}')
form_data = {
'viz_type': 'sankey',
@@ -183,17 +183,17 @@ def test_save_slice(self):
def test_filter_endpoint(self):
self.login(username='admin')
- slice_name = "Energy Sankey"
+ slice_name = 'Energy Sankey'
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
tbl_id = self.table_ids.get('energy_usage')
table = db.session.query(SqlaTable).filter(SqlaTable.id == tbl_id)
table.filter_select_enabled = True
url = (
- "/superset/filter/table/{}/target/?viz_type=sankey&groupby=source"
- "&metric=sum__value&flt_col_0=source&flt_op_0=in&flt_eq_0=&"
- "slice_id={}&datasource_name=energy_usage&"
- "datasource_id=1&datasource_type=table")
+ '/superset/filter/table/{}/target/?viz_type=sankey&groupby=source'
+ '&metric=sum__value&flt_col_0=source&flt_op_0=in&flt_eq_0=&'
+ 'slice_id={}&datasource_name=energy_usage&'
+ 'datasource_id=1&datasource_type=table')
# Changing name
resp = self.get_resp(url.format(tbl_id, slice_id))
@@ -211,7 +211,7 @@ def test_slices(self):
(slc.slice_name, 'slice_id_url', slc.slice_id_url),
]
for name, method, url in urls:
- logging.info("[{name}]/[{method}]: {url}".format(**locals()))
+ logging.info('[{name}]/[{method}]: {url}'.format(**locals()))
self.client.get(url)
def test_tablemodelview_list(self):
@@ -250,7 +250,7 @@ def test_slices_V2(self):
(slc.slice_name, 'slice_url', slc.slice_url),
]
for name, method, url in urls:
- print("[{name}]/[{method}]: {url}".format(**locals()))
+ print('[{name}]/[{method}]: {url}'.format(**locals()))
response = self.client.get(url)
def test_dashboard(self):
@@ -266,12 +266,12 @@ def test_doctests(self):
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
- raise Exception("Failed a doctest")
+ raise Exception('Failed a doctest')
def test_misc(self):
- assert self.get_resp('/health') == "OK"
- assert self.get_resp('/healthcheck') == "OK"
- assert self.get_resp('/ping') == "OK"
+ assert self.get_resp('/health') == 'OK'
+ assert self.get_resp('/healthcheck') == 'OK'
+ assert self.get_resp('/ping') == 'OK'
def test_testconn(self, username='admin'):
self.login(username=username)
@@ -308,12 +308,12 @@ def test_custom_password_store(self):
conn_pre = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
def custom_password_store(uri):
- return "password_store_test"
+ return 'password_store_test'
database.custom_password_store = custom_password_store
conn = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
if conn_pre.password:
- assert conn.password == "password_store_test"
+ assert conn.password == 'password_store_test'
assert conn.password != conn_pre.password
def test_databaseview_edit(self, username='admin'):
@@ -330,7 +330,7 @@ def test_databaseview_edit(self, username='admin'):
self.assertEqual(sqlalchemy_uri_decrypted, database.sqlalchemy_uri_decrypted)
def test_warm_up_cache(self):
- slc = self.get_slice("Girls", db.session)
+ slc = self.get_slice('Girls', db.session)
data = self.get_json_resp(
'/superset/warm_up_cache?slice_id={}'.format(slc.id))
@@ -343,12 +343,12 @@ def test_warm_up_cache(self):
def test_shortner(self):
self.login(username='admin')
data = (
- "//superset/explore/table/1/?viz_type=sankey&groupby=source&"
- "groupby=target&metric=sum__value&row_limit=5000&where=&having=&"
- "flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id=78&slice_name="
- "Energy+Sankey&collapsed_fieldsets=&action=&datasource_name="
- "energy_usage&datasource_id=1&datasource_type=table&"
- "previous_viz_type=sankey"
+ '//superset/explore/table/1/?viz_type=sankey&groupby=source&'
+ 'groupby=target&metric=sum__value&row_limit=5000&where=&having=&'
+ 'flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id=78&slice_name='
+ 'Energy+Sankey&collapsed_fieldsets=&action=&datasource_name='
+ 'energy_usage&datasource_id=1&datasource_type=table&'
+ 'previous_viz_type=sankey'
)
resp = self.client.post('/r/shortner/', data=data)
assert '/r/' in resp.data.decode('utf-8')
@@ -383,7 +383,7 @@ def test_kv(self):
def test_save_dash(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
- slug="births").first()
+ slug='births').first()
positions = []
for i, slc in enumerate(dash.slices):
d = {
@@ -401,12 +401,12 @@ def test_save_dash(self, username='admin'):
}
url = '/superset/save_dash/{}/'.format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
- self.assertIn("SUCCESS", resp)
+ self.assertIn('SUCCESS', resp)
def test_save_dash_with_filter(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
- slug="world_health").first()
+ slug='world_health').first()
positions = []
for i, slc in enumerate(dash.slices):
d = {
@@ -429,21 +429,21 @@ def test_save_dash_with_filter(self, username='admin'):
url = '/superset/save_dash/{}/'.format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
- self.assertIn("SUCCESS", resp)
+ self.assertIn('SUCCESS', resp)
updatedDash = db.session.query(models.Dashboard).filter_by(
- slug="world_health").first()
+ slug='world_health').first()
new_url = updatedDash.url
- self.assertIn("region", new_url)
+ self.assertIn('region', new_url)
resp = self.get_resp(new_url)
- self.assertIn("North America", resp)
+ self.assertIn('North America', resp)
def test_save_dash_with_dashboard_title(self, username='admin'):
self.login(username=username)
dash = (
db.session.query(models.Dashboard)
- .filter_by(slug="births")
+ .filter_by(slug='births')
.first()
)
origin_title = dash.dashboard_title
@@ -466,7 +466,7 @@ def test_save_dash_with_dashboard_title(self, username='admin'):
self.get_resp(url, data=dict(data=json.dumps(data)))
updatedDash = (
db.session.query(models.Dashboard)
- .filter_by(slug="births")
+ .filter_by(slug='births')
.first()
)
self.assertEqual(updatedDash.dashboard_title, 'new title')
@@ -477,7 +477,7 @@ def test_save_dash_with_dashboard_title(self, username='admin'):
def test_copy_dash(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
- slug="births").first()
+ slug='births').first()
positions = []
for i, slc in enumerate(dash.slices):
d = {
@@ -514,37 +514,37 @@ def test_copy_dash(self, username='admin'):
def test_add_slices(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
- slug="births").first()
+ slug='births').first()
new_slice = db.session.query(models.Slice).filter_by(
- slice_name="Mapbox Long/Lat").first()
+ slice_name='Mapbox Long/Lat').first()
existing_slice = db.session.query(models.Slice).filter_by(
- slice_name="Name Cloud").first()
+ slice_name='Name Cloud').first()
data = {
- "slice_ids": [new_slice.data["slice_id"],
- existing_slice.data["slice_id"]],
+ 'slice_ids': [new_slice.data['slice_id'],
+ existing_slice.data['slice_id']],
}
url = '/superset/add_slices/{}/'.format(dash.id)
resp = self.client.post(url, data=dict(data=json.dumps(data)))
- assert "SLICES ADDED" in resp.data.decode('utf-8')
+ assert 'SLICES ADDED' in resp.data.decode('utf-8')
dash = db.session.query(models.Dashboard).filter_by(
- slug="births").first()
+ slug='births').first()
new_slice = db.session.query(models.Slice).filter_by(
- slice_name="Mapbox Long/Lat").first()
+ slice_name='Mapbox Long/Lat').first()
assert new_slice in dash.slices
assert len(set(dash.slices)) == len(dash.slices)
# cleaning up
dash = db.session.query(models.Dashboard).filter_by(
- slug="births").first()
+ slug='births').first()
dash.slices = [
- o for o in dash.slices if o.slice_name != "Mapbox Long/Lat"]
+ o for o in dash.slices if o.slice_name != 'Mapbox Long/Lat']
db.session.commit()
def test_gamma(self):
self.login(username='gamma')
- assert "List Slice" in self.get_resp('/slicemodelview/list/')
- assert "List Dashboard" in self.get_resp('/dashboardmodelview/list/')
+ assert 'List Slice' in self.get_resp('/slicemodelview/list/')
+ assert 'List Dashboard' in self.get_resp('/dashboardmodelview/list/')
def test_csv_endpoint(self):
self.login('admin')
@@ -553,13 +553,13 @@ def test_csv_endpoint(self):
FROM ab_user
WHERE first_name='admin'
"""
- client_id = "{}".format(random.getrandbits(64))[:10]
+ client_id = '{}'.format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp('/superset/csv/{}'.format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(
- io.StringIO("first_name,last_name\nadmin, user\n"))
+ io.StringIO('first_name,last_name\nadmin, user\n'))
self.assertEqual(list(expected_data), list(data))
self.logout()
@@ -587,7 +587,7 @@ def test_public_user_dashboard_access(self):
self.assertIn('birth_names', self.get_resp('/slicemodelview/list/'))
resp = self.get_resp('/dashboardmodelview/list/')
- self.assertIn("/superset/dashboard/births/", resp)
+ self.assertIn('/superset/dashboard/births/', resp)
self.assertIn('Births', self.get_resp('/superset/dashboard/births/'))
@@ -596,7 +596,7 @@ def test_public_user_dashboard_access(self):
self.assertNotIn('wb_health_population', resp)
resp = self.get_resp('/dashboardmodelview/list/')
- self.assertNotIn("/superset/dashboard/world_health/", resp)
+ self.assertNotIn('/superset/dashboard/world_health/', resp)
def test_dashboard_with_created_by_can_be_accessed_by_public_users(self):
self.logout()
@@ -609,7 +609,7 @@ def test_dashboard_with_created_by_can_be_accessed_by_public_users(self):
self.grant_public_access_to_table(table)
dash = db.session.query(models.Dashboard).filter_by(
- slug="births").first()
+ slug='births').first()
dash.owners = [appbuilder.sm.find_user('admin')]
dash.created_by = appbuilder.sm.find_user('admin')
db.session.merge(dash)
@@ -621,7 +621,7 @@ def test_only_owners_can_save(self):
dash = (
db.session
.query(models.Dashboard)
- .filter_by(slug="births")
+ .filter_by(slug='births')
.first()
)
dash.owners = []
@@ -638,7 +638,7 @@ def test_only_owners_can_save(self):
dash = (
db.session
.query(models.Dashboard)
- .filter_by(slug="births")
+ .filter_by(slug='births')
.first()
)
dash.owners = [alpha]
@@ -662,29 +662,29 @@ def test_process_template(self):
def test_get_template_kwarg(self):
maindb = self.get_main_database(db.session)
- s = "{{ foo }}"
+ s = '{{ foo }}'
tp = jinja_context.get_template_processor(database=maindb, foo='bar')
rendered = tp.process_template(s)
- self.assertEqual("bar", rendered)
+ self.assertEqual('bar', rendered)
def test_template_kwarg(self):
maindb = self.get_main_database(db.session)
- s = "{{ foo }}"
+ s = '{{ foo }}'
tp = jinja_context.get_template_processor(database=maindb)
rendered = tp.process_template(s, foo='bar')
- self.assertEqual("bar", rendered)
+ self.assertEqual('bar', rendered)
def test_templated_sql_json(self):
self.login('admin')
sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}' as test"
- data = self.run_sql(sql, "fdaklj3ws")
- self.assertEqual(data['data'][0]['test'], "2017-01-01T00:00:00")
+ data = self.run_sql(sql, 'fdaklj3ws')
+ self.assertEqual(data['data'][0]['test'], '2017-01-01T00:00:00')
def test_table_metadata(self):
maindb = self.get_main_database(db.session)
backend = maindb.backend
data = self.get_json_resp(
- "/superset/table/{}/ab_user/null/".format(maindb.id))
+ '/superset/table/{}/ab_user/null/'.format(maindb.id))
self.assertEqual(data['name'], 'ab_user')
assert len(data['columns']) > 5
assert data.get('selectStar').startswith('SELECT')
@@ -717,7 +717,7 @@ def test_fetch_datasource_metadata(self):
def test_user_profile(self, username='admin'):
self.login(username=username)
- slc = self.get_slice("Girls", db.session)
+ slc = self.get_slice('Girls', db.session)
# Setting some faves
url = '/superset/favstar/Slice/{}/select/'.format(slc.id)
@@ -727,7 +727,7 @@ def test_user_profile(self, username='admin'):
dash = (
db.session
.query(models.Dashboard)
- .filter_by(slug="births")
+ .filter_by(slug='births')
.first()
)
url = '/superset/favstar/Dashboard/{}/select/'.format(dash.id)
@@ -760,24 +760,24 @@ def test_slice_id_is_always_logged_correctly_on_web_request(self):
def test_slice_id_is_always_logged_correctly_on_ajax_request(self):
# superset/explore_json case
- self.login(username="admin")
+ self.login(username='admin')
slc = db.session.query(models.Slice).filter_by(slice_name='Girls').one()
qry = db.session.query(models.Log).filter_by(slice_id=slc.id)
- slc_url = slc.slice_url.replace("explore", "explore_json")
+ slc_url = slc.slice_url.replace('explore', 'explore_json')
self.get_json_resp(slc_url)
self.assertEqual(1, qry.count())
def test_slice_query_endpoint(self):
# API endpoint for query string
- self.login(username="admin")
- slc = self.get_slice("Girls", db.session)
+ self.login(username='admin')
+ slc = self.get_slice('Girls', db.session)
resp = self.get_resp('/superset/slice_query/{}/'.format(slc.id))
assert 'query' in resp
assert 'language' in resp
self.logout()
def test_viz_get_fillna_for_columns(self):
- slc = self.get_slice("Girls", db.session)
+ slc = self.get_slice('Girls', db.session)
q = slc.viz.query_obj()
results = slc.viz.datasource.query(q)
fillna_columns = slc.viz.get_fillna_for_columns(results.df.columns)
diff --git a/tests/druid_tests.py b/tests/druid_tests.py
index b8250f3b6193c..76320bff256be 100644
--- a/tests/druid_tests.py
+++ b/tests/druid_tests.py
@@ -23,47 +23,47 @@ def __reduce__(self):
SEGMENT_METADATA = [{
- "id": "some_id",
- "intervals": ["2013-05-13T00:00:00.000Z/2013-05-14T00:00:00.000Z"],
- "columns": {
- "__time": {
- "type": "LONG", "hasMultipleValues": False,
- "size": 407240380, "cardinality": None, "errorMessage": None},
- "dim1": {
- "type": "STRING", "hasMultipleValues": False,
- "size": 100000, "cardinality": 1944, "errorMessage": None},
- "dim2": {
- "type": "STRING", "hasMultipleValues": True,
- "size": 100000, "cardinality": 1504, "errorMessage": None},
- "metric1": {
- "type": "FLOAT", "hasMultipleValues": False,
- "size": 100000, "cardinality": None, "errorMessage": None},
+ 'id': 'some_id',
+ 'intervals': ['2013-05-13T00:00:00.000Z/2013-05-14T00:00:00.000Z'],
+ 'columns': {
+ '__time': {
+ 'type': 'LONG', 'hasMultipleValues': False,
+ 'size': 407240380, 'cardinality': None, 'errorMessage': None},
+ 'dim1': {
+ 'type': 'STRING', 'hasMultipleValues': False,
+ 'size': 100000, 'cardinality': 1944, 'errorMessage': None},
+ 'dim2': {
+ 'type': 'STRING', 'hasMultipleValues': True,
+ 'size': 100000, 'cardinality': 1504, 'errorMessage': None},
+ 'metric1': {
+ 'type': 'FLOAT', 'hasMultipleValues': False,
+ 'size': 100000, 'cardinality': None, 'errorMessage': None},
},
- "aggregators": {
- "metric1": {
- "type": "longSum",
- "name": "metric1",
- "fieldName": "metric1"},
+ 'aggregators': {
+ 'metric1': {
+ 'type': 'longSum',
+ 'name': 'metric1',
+ 'fieldName': 'metric1'},
},
- "size": 300000,
- "numRows": 5000000,
+ 'size': 300000,
+ 'numRows': 5000000,
}]
GB_RESULT_SET = [
{
- "version": "v1",
- "timestamp": "2012-01-01T00:00:00.000Z",
- "event": {
- "dim1": 'Canada',
- "metric1": 12345678,
+ 'version': 'v1',
+ 'timestamp': '2012-01-01T00:00:00.000Z',
+ 'event': {
+ 'dim1': 'Canada',
+ 'metric1': 12345678,
},
},
{
- "version": "v1",
- "timestamp": "2012-01-01T00:00:00.000Z",
- "event": {
- "dim1": 'USA',
- "metric1": 12345678 / 2,
+ 'version': 'v1',
+ 'timestamp': '2012-01-01T00:00:00.000Z',
+ 'event': {
+ 'dim1': 'USA',
+ 'metric1': 12345678 / 2,
},
},
]
@@ -122,7 +122,7 @@ def test_client(self, PyDruid):
resp = self.get_resp('/superset/explore/druid/{}/'.format(
datasource_id))
- self.assertIn("test_datasource", resp)
+ self.assertIn('test_datasource', resp)
form_data = {
'viz_type': 'table',
'granularity': 'one+day',
@@ -141,7 +141,7 @@ def test_client(self, PyDruid):
datasource_id, json.dumps(form_data))
)
resp = self.get_json_resp(url)
- self.assertEqual("Canada", resp['data']['records'][0]['dim1'])
+ self.assertEqual('Canada', resp['data']['records'][0]['dim1'])
form_data = {
'viz_type': 'table',
@@ -161,7 +161,7 @@ def test_client(self, PyDruid):
datasource_id, json.dumps(form_data))
)
resp = self.get_json_resp(url)
- self.assertEqual("Canada", resp['data']['records'][0]['dim1'])
+ self.assertEqual('Canada', resp['data']['records'][0]['dim1'])
def test_druid_sync_from_config(self):
CLUSTER_NAME = 'new_druid'
@@ -184,19 +184,19 @@ def test_druid_sync_from_config(self):
db.session.commit()
cfg = {
- "user": "admin",
- "cluster": CLUSTER_NAME,
- "config": {
- "name": "test_click",
- "dimensions": ["affiliate_id", "campaign", "first_seen"],
- "metrics_spec": [{"type": "count", "name": "count"},
- {"type": "sum", "name": "sum"}],
- "batch_ingestion": {
- "sql": "SELECT * FROM clicks WHERE d='{{ ds }}'",
- "ts_column": "d",
- "sources": [{
- "table": "clicks",
- "partition": "d='{{ ds }}'",
+ 'user': 'admin',
+ 'cluster': CLUSTER_NAME,
+ 'config': {
+ 'name': 'test_click',
+ 'dimensions': ['affiliate_id', 'campaign', 'first_seen'],
+ 'metrics_spec': [{'type': 'count', 'name': 'count'},
+ {'type': 'sum', 'name': 'sum'}],
+ 'batch_ingestion': {
+ 'sql': "SELECT * FROM clicks WHERE d='{{ ds }}'",
+ 'ts_column': 'd',
+ 'sources': [{
+ 'table': 'clicks',
+ 'partition': "d='{{ ds }}'",
}],
},
},
@@ -207,13 +207,13 @@ def check():
druid_ds = (
db.session
.query(DruidDatasource)
- .filter_by(datasource_name="test_click")
+ .filter_by(datasource_name='test_click')
.one()
)
col_names = set([c.column_name for c in druid_ds.columns])
- assert {"affiliate_id", "campaign", "first_seen"} == col_names
+ assert {'affiliate_id', 'campaign', 'first_seen'} == col_names
metric_names = {m.metric_name for m in druid_ds.metrics}
- assert {"count", "sum"} == metric_names
+ assert {'count', 'sum'} == metric_names
assert resp.status_code == 201
check()
@@ -222,29 +222,29 @@ def check():
# datasource exists, add new metrics and dimensions
cfg = {
- "user": "admin",
- "cluster": CLUSTER_NAME,
- "config": {
- "name": "test_click",
- "dimensions": ["affiliate_id", "second_seen"],
- "metrics_spec": [
- {"type": "bla", "name": "sum"},
- {"type": "unique", "name": "unique"},
+ 'user': 'admin',
+ 'cluster': CLUSTER_NAME,
+ 'config': {
+ 'name': 'test_click',
+ 'dimensions': ['affiliate_id', 'second_seen'],
+ 'metrics_spec': [
+ {'type': 'bla', 'name': 'sum'},
+ {'type': 'unique', 'name': 'unique'},
],
},
}
resp = self.client.post('/superset/sync_druid/', data=json.dumps(cfg))
druid_ds = db.session.query(DruidDatasource).filter_by(
- datasource_name="test_click").one()
+ datasource_name='test_click').one()
# columns and metrics are not deleted if config is changed as
# user could define his own dimensions / metrics and want to keep them
assert set([c.column_name for c in druid_ds.columns]) == set(
- ["affiliate_id", "campaign", "first_seen", "second_seen"])
+ ['affiliate_id', 'campaign', 'first_seen', 'second_seen'])
assert set([m.metric_name for m in druid_ds.metrics]) == set(
- ["count", "sum", "unique"])
+ ['count', 'sum', 'unique'])
# metric type will not be overridden, sum stays instead of bla
assert set([m.metric_type for m in druid_ds.metrics]) == set(
- ["longSum", "sum", "unique"])
+ ['longSum', 'sum', 'unique'])
assert resp.status_code == 201
def test_filter_druid_datasource(self):
@@ -322,7 +322,7 @@ def test_sync_druid_perm(self, PyDruid):
view_menu_name = cluster.datasources[0].get_perm()
view_menu = sm.find_view_menu(view_menu_name)
- permission = sm.find_permission("datasource_access")
+ permission = sm.find_permission('datasource_access')
pv = sm.get_session.query(sm.permissionview_model).filter_by(
permission=permission, view_menu=view_menu).first()
@@ -511,7 +511,7 @@ def test_get_filters_handles_none_for_string_types(self):
self.assertEqual('', res.filter['filter']['value'])
def test_get_filters_extracts_values_in_quotes(self):
- filtr = {'col': 'A', 'op': 'in', 'val': [" 'a' "]}
+ filtr = {'col': 'A', 'op': 'in', 'val': [' "a" ']}
res = DruidDatasource.get_filters([filtr], [])
self.assertEqual('a', res.filter['filter']['value'])
diff --git a/tests/import_export_tests.py b/tests/import_export_tests.py
index 8bc706a06ca11..e945630f36986 100644
--- a/tests/import_export_tests.py
+++ b/tests/import_export_tests.py
@@ -60,9 +60,9 @@ def create_slice(self, name, ds_id=None, id=None, db_name='main',
'database_name': db_name,
'schema': '',
# Test for trailing commas
- "metrics": [
- "sum__signup_attempt_email",
- "sum__signup_attempt_facebook",
+ 'metrics': [
+ 'sum__signup_attempt_email',
+ 'sum__signup_attempt_facebook',
],
}
@@ -319,7 +319,7 @@ def test_import_dashboard_1_slice(self):
make_transient(expected_dash)
self.assert_dash_equals(
expected_dash, imported_dash, check_position=False)
- self.assertEquals({"remote_id": 10002, "import_time": 1990},
+ self.assertEquals({'remote_id': 10002, 'import_time': 1990},
json.loads(imported_dash.json_metadata))
expected_position = dash_with_1_slice.position_array
@@ -333,11 +333,11 @@ def test_import_dashboard_2_slices(self):
dash_with_2_slices = self.create_dashboard(
'dash_with_2_slices', slcs=[e_slc, b_slc], id=10003)
dash_with_2_slices.json_metadata = json.dumps({
- "remote_id": 10003,
- "filter_immune_slices": ["{}".format(e_slc.id)],
- "expanded_slices": {
- "{}".format(e_slc.id): True,
- "{}".format(b_slc.id): False,
+ 'remote_id': 10003,
+ 'filter_immune_slices': ['{}'.format(e_slc.id)],
+ 'expanded_slices': {
+ '{}'.format(e_slc.id): True,
+ '{}'.format(b_slc.id): False,
},
})
@@ -353,10 +353,10 @@ def test_import_dashboard_2_slices(self):
i_e_slc = self.get_slice_by_name('e_slc')
i_b_slc = self.get_slice_by_name('b_slc')
expected_json_metadata = {
- "remote_id": 10003,
- "import_time": 1991,
- "filter_immune_slices": ["{}".format(i_e_slc.id)],
- "expanded_slices": {
+ 'remote_id': 10003,
+ 'import_time': 1991,
+ 'filter_immune_slices': ['{}'.format(i_e_slc.id)],
+ 'expanded_slices': {
'{}'.format(i_e_slc.id): True,
'{}'.format(i_b_slc.id): False,
},
@@ -391,7 +391,7 @@ def test_import_override_dashboard_2_slices(self):
imported_dash = self.get_dash(imported_dash_id_2)
self.assert_dash_equals(
expected_dash, imported_dash, check_position=False)
- self.assertEquals({"remote_id": 10004, "import_time": 1992},
+ self.assertEquals({'remote_id': 10004, 'import_time': 1992},
json.loads(imported_dash.json_metadata))
def test_import_table_no_metadata(self):
@@ -403,7 +403,7 @@ def test_import_table_no_metadata(self):
def test_import_table_1_col_1_met(self):
table = self.create_table(
'table_1_col_1_met', id=10002,
- cols_names=["col1"], metric_names=["metric1"])
+ cols_names=['col1'], metric_names=['metric1'])
imported_id = SqlaTable.import_obj(table, import_time=1990)
imported = self.get_table(imported_id)
self.assert_table_equals(table, imported)
@@ -464,7 +464,7 @@ def test_import_druid_no_metadata(self):
def test_import_druid_1_col_1_met(self):
datasource = self.create_druid_datasource(
'druid_1_col_1_met', id=10002,
- cols_names=["col1"], metric_names=["metric1"])
+ cols_names=['col1'], metric_names=['metric1'])
imported_id = DruidDatasource.import_obj(
datasource, import_time=1990)
imported = self.get_datasource(imported_id)
diff --git a/tests/sql_parse_tests.py b/tests/sql_parse_tests.py
index c9cc389309f28..c9368bb362a27 100644
--- a/tests/sql_parse_tests.py
+++ b/tests/sql_parse_tests.py
@@ -16,42 +16,42 @@ def extract_tables(self, query):
return sq.tables
def test_simple_select(self):
- query = "SELECT * FROM tbname"
- self.assertEquals({"tbname"}, self.extract_tables(query))
+ query = 'SELECT * FROM tbname'
+ self.assertEquals({'tbname'}, self.extract_tables(query))
# underscores
- query = "SELECT * FROM tb_name"
- self.assertEquals({"tb_name"},
+ query = 'SELECT * FROM tb_name'
+ self.assertEquals({'tb_name'},
self.extract_tables(query))
# quotes
query = 'SELECT * FROM "tbname"'
- self.assertEquals({"tbname"}, self.extract_tables(query))
+ self.assertEquals({'tbname'}, self.extract_tables(query))
# unicode encoding
query = 'SELECT * FROM "tb_name" WHERE city = "Lübeck"'
- self.assertEquals({"tb_name"}, self.extract_tables(query))
+ self.assertEquals({'tb_name'}, self.extract_tables(query))
# schema
self.assertEquals(
- {"schemaname.tbname"},
- self.extract_tables("SELECT * FROM schemaname.tbname"))
+ {'schemaname.tbname'},
+ self.extract_tables('SELECT * FROM schemaname.tbname'))
# quotes
- query = "SELECT field1, field2 FROM tb_name"
- self.assertEquals({"tb_name"}, self.extract_tables(query))
+ query = 'SELECT field1, field2 FROM tb_name'
+ self.assertEquals({'tb_name'}, self.extract_tables(query))
- query = "SELECT t1.f1, t2.f2 FROM t1, t2"
- self.assertEquals({"t1", "t2"}, self.extract_tables(query))
+ query = 'SELECT t1.f1, t2.f2 FROM t1, t2'
+ self.assertEquals({'t1', 't2'}, self.extract_tables(query))
def test_select_named_table(self):
- query = "SELECT a.date, a.field FROM left_table a LIMIT 10"
+ query = 'SELECT a.date, a.field FROM left_table a LIMIT 10'
self.assertEquals(
- {"left_table"}, self.extract_tables(query))
+ {'left_table'}, self.extract_tables(query))
def test_reverse_select(self):
- query = "FROM t1 SELECT field"
- self.assertEquals({"t1"}, self.extract_tables(query))
+ query = 'FROM t1 SELECT field'
+ self.assertEquals({'t1'}, self.extract_tables(query))
def test_subselect(self):
query = """
@@ -63,7 +63,7 @@ def test_subselect(self):
) sub, s2.t2
WHERE sub.resolution = 'NONE'
"""
- self.assertEquals({"s1.t1", "s2.t2"},
+ self.assertEquals({'s1.t1', 's2.t2'},
self.extract_tables(query))
query = """
@@ -75,7 +75,7 @@ def test_subselect(self):
) sub
WHERE sub.resolution = 'NONE'
"""
- self.assertEquals({"s1.t1"}, self.extract_tables(query))
+ self.assertEquals({'s1.t1'}, self.extract_tables(query))
query = """
SELECT * FROM t1
@@ -86,25 +86,25 @@ def test_subselect(self):
WHERE ROW(5*t2.s1,77)=
(SELECT 50,11*s1 FROM t4)));
"""
- self.assertEquals({"t1", "t2", "t3", "t4"},
+ self.assertEquals({'t1', 't2', 't3', 't4'},
self.extract_tables(query))
def test_select_in_expression(self):
- query = "SELECT f1, (SELECT count(1) FROM t2) FROM t1"
- self.assertEquals({"t1", "t2"}, self.extract_tables(query))
+ query = 'SELECT f1, (SELECT count(1) FROM t2) FROM t1'
+ self.assertEquals({'t1', 't2'}, self.extract_tables(query))
def test_union(self):
- query = "SELECT * FROM t1 UNION SELECT * FROM t2"
- self.assertEquals({"t1", "t2"}, self.extract_tables(query))
+ query = 'SELECT * FROM t1 UNION SELECT * FROM t2'
+ self.assertEquals({'t1', 't2'}, self.extract_tables(query))
- query = "SELECT * FROM t1 UNION ALL SELECT * FROM t2"
- self.assertEquals({"t1", "t2"}, self.extract_tables(query))
+ query = 'SELECT * FROM t1 UNION ALL SELECT * FROM t2'
+ self.assertEquals({'t1', 't2'}, self.extract_tables(query))
- query = "SELECT * FROM t1 INTERSECT ALL SELECT * FROM t2"
- self.assertEquals({"t1", "t2"}, self.extract_tables(query))
+ query = 'SELECT * FROM t1 INTERSECT ALL SELECT * FROM t2'
+ self.assertEquals({'t1', 't2'}, self.extract_tables(query))
def test_select_from_values(self):
- query = "SELECT * FROM VALUES (13, 42)"
+ query = 'SELECT * FROM VALUES (13, 42)'
self.assertFalse(self.extract_tables(query))
def test_select_array(self):
@@ -112,25 +112,25 @@ def test_select_array(self):
SELECT ARRAY[1, 2, 3] AS my_array
FROM t1 LIMIT 10
"""
- self.assertEquals({"t1"}, self.extract_tables(query))
+ self.assertEquals({'t1'}, self.extract_tables(query))
def test_select_if(self):
query = """
SELECT IF(CARDINALITY(my_array) >= 3, my_array[3], NULL)
FROM t1 LIMIT 10
"""
- self.assertEquals({"t1"}, self.extract_tables(query))
+ self.assertEquals({'t1'}, self.extract_tables(query))
# SHOW TABLES ((FROM | IN) qualifiedName)? (LIKE pattern=STRING)?
def test_show_tables(self):
- query = 'SHOW TABLES FROM s1 like "%order%"'
+ query = "SHOW TABLES FROM s1 like '%order%'"
# TODO: figure out what should code do here
- self.assertEquals({"s1"}, self.extract_tables(query))
+ self.assertEquals({'s1'}, self.extract_tables(query))
# SHOW COLUMNS (FROM | IN) qualifiedName
def test_show_columns(self):
- query = "SHOW COLUMNS FROM t1"
- self.assertEquals({"t1"}, self.extract_tables(query))
+ query = 'SHOW COLUMNS FROM t1'
+ self.assertEquals({'t1'}, self.extract_tables(query))
def test_where_subquery(self):
query = """
@@ -138,26 +138,26 @@ def test_where_subquery(self):
FROM t1
WHERE regionkey = (SELECT max(regionkey) FROM t2)
"""
- self.assertEquals({"t1", "t2"}, self.extract_tables(query))
+ self.assertEquals({'t1', 't2'}, self.extract_tables(query))
query = """
SELECT name
FROM t1
WHERE regionkey IN (SELECT regionkey FROM t2)
"""
- self.assertEquals({"t1", "t2"}, self.extract_tables(query))
+ self.assertEquals({'t1', 't2'}, self.extract_tables(query))
query = """
SELECT name
FROM t1
WHERE regionkey EXISTS (SELECT regionkey FROM t2)
"""
- self.assertEquals({"t1", "t2"}, self.extract_tables(query))
+ self.assertEquals({'t1', 't2'}, self.extract_tables(query))
# DESCRIBE | DESC qualifiedName
def test_describe(self):
- self.assertEquals({"t1"}, self.extract_tables("DESCRIBE t1"))
- self.assertEquals({"t1"}, self.extract_tables("DESC t1"))
+ self.assertEquals({'t1'}, self.extract_tables('DESCRIBE t1'))
+ self.assertEquals({'t1'}, self.extract_tables('DESC t1'))
# SHOW PARTITIONS FROM qualifiedName (WHERE booleanExpression)?
# (ORDER BY sortItem (',' sortItem)*)? (LIMIT limit=(INTEGER_VALUE | ALL))?
@@ -166,11 +166,11 @@ def test_show_partitions(self):
SHOW PARTITIONS FROM orders
WHERE ds >= '2013-01-01' ORDER BY ds DESC;
"""
- self.assertEquals({"orders"}, self.extract_tables(query))
+ self.assertEquals({'orders'}, self.extract_tables(query))
def test_join(self):
- query = "SELECT t1.*, t2.* FROM t1 JOIN t2 ON t1.a = t2.a;"
- self.assertEquals({"t1", "t2"}, self.extract_tables(query))
+ query = 'SELECT t1.*, t2.* FROM t1 JOIN t2 ON t1.a = t2.a;'
+ self.assertEquals({'t1', 't2'}, self.extract_tables(query))
# subquery + join
query = """
@@ -184,7 +184,7 @@ def test_join(self):
) b
ON a.date = b.date
"""
- self.assertEquals({"left_table", "right_table"},
+ self.assertEquals({'left_table', 'right_table'},
self.extract_tables(query))
query = """
@@ -198,7 +198,7 @@ def test_join(self):
) b
ON a.date = b.date
"""
- self.assertEquals({"left_table", "right_table"},
+ self.assertEquals({'left_table', 'right_table'},
self.extract_tables(query))
query = """
@@ -212,7 +212,7 @@ def test_join(self):
) b
ON a.date = b.date
"""
- self.assertEquals({"left_table", "right_table"},
+ self.assertEquals({'left_table', 'right_table'},
self.extract_tables(query))
query = """
@@ -226,7 +226,7 @@ def test_join(self):
) b
ON a.date = b.date
"""
- self.assertEquals({"left_table", "right_table"},
+ self.assertEquals({'left_table', 'right_table'},
self.extract_tables(query))
# TODO: add SEMI join support, SQL Parse does not handle it.
@@ -241,7 +241,7 @@ def test_join(self):
# ) b
# ON a.date = b.date
# """
- # self.assertEquals({"left_table", "right_table"},
+ # self.assertEquals({'left_table', 'right_table'},
# sql_parse.extract_tables(query))
def test_combinations(self):
@@ -255,14 +255,14 @@ def test_combinations(self):
WHERE ROW(5*t3.s1,77)=
(SELECT 50,11*s1 FROM t4)));
"""
- self.assertEquals({"t1", "t3", "t4", "t6"},
+ self.assertEquals({'t1', 't3', 't4', 't6'},
self.extract_tables(query))
query = """
SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM EmployeeS)
AS S1) AS S2) AS S3;
"""
- self.assertEquals({"EmployeeS"}, self.extract_tables(query))
+ self.assertEquals({'EmployeeS'}, self.extract_tables(query))
def test_with(self):
query = """
@@ -272,7 +272,7 @@ def test_with(self):
z AS (SELECT b AS c FROM t3)
SELECT c FROM z;
"""
- self.assertEquals({"t1", "t2", "t3"},
+ self.assertEquals({'t1', 't2', 't3'},
self.extract_tables(query))
query = """
@@ -282,7 +282,7 @@ def test_with(self):
z AS (SELECT b AS c FROM y)
SELECT c FROM z;
"""
- self.assertEquals({"t1"}, self.extract_tables(query))
+ self.assertEquals({'t1'}, self.extract_tables(query))
def test_reusing_aliases(self):
query = """
@@ -290,11 +290,11 @@ def test_reusing_aliases(self):
q2 as ( select key from src where key = '5')
select * from (select key from q1) a;
"""
- self.assertEquals({"src"}, self.extract_tables(query))
+ self.assertEquals({'src'}, self.extract_tables(query))
def multistatement(self):
- query = "SELECT * FROM t1; SELECT * FROM t2"
- self.assertEquals({"t1", "t2"}, self.extract_tables(query))
+ query = 'SELECT * FROM t1; SELECT * FROM t2'
+ self.assertEquals({'t1', 't2'}, self.extract_tables(query))
- query = "SELECT * FROM t1; SELECT * FROM t2;"
- self.assertEquals({"t1", "t2"}, self.extract_tables(query))
+ query = 'SELECT * FROM t1; SELECT * FROM t2;'
+ self.assertEquals({'t1', 't2'}, self.extract_tables(query))
diff --git a/tests/sqllab_tests.py b/tests/sqllab_tests.py
index c4332c982369d..2caf4c2ae16a2 100644
--- a/tests/sqllab_tests.py
+++ b/tests/sqllab_tests.py
@@ -25,15 +25,15 @@ def run_some_queries(self):
db.session.query(Query).delete()
db.session.commit()
self.run_sql(
- "SELECT * FROM ab_user",
+ 'SELECT * FROM ab_user',
client_id='client_id_1',
user_name='admin')
self.run_sql(
- "SELECT * FROM NO_TABLE",
+ 'SELECT * FROM NO_TABLE',
client_id='client_id_3',
user_name='admin')
self.run_sql(
- "SELECT * FROM ab_permission",
+ 'SELECT * FROM ab_permission',
client_id='client_id_2',
user_name='gamma_sqllab')
self.logout()
@@ -46,10 +46,10 @@ def tearDown(self):
def test_sql_json(self):
self.login('admin')
- data = self.run_sql('SELECT * FROM ab_user', "1")
+ data = self.run_sql('SELECT * FROM ab_user', '1')
self.assertLess(0, len(data['data']))
- data = self.run_sql('SELECT * FROM unexistant_table', "2")
+ data = self.run_sql('SELECT * FROM unexistant_table', '2')
self.assertLess(0, len(data['error']))
def test_sql_json_has_access(self):
@@ -64,7 +64,7 @@ def test_sql_json_has_access(self):
.filter(ab_models.Permission.name == 'database_access')
.first()
)
- astronaut = sm.add_role("Astronaut")
+ astronaut = sm.add_role('Astronaut')
sm.add_permission_role(astronaut, main_db_permission_view)
# Astronaut role is Gamma + sqllab + main db permissions
for perm in sm.find_role('Gamma').permissions:
@@ -78,7 +78,7 @@ def test_sql_json_has_access(self):
'gagarin', 'Iurii', 'Gagarin', 'gagarin@cosmos.ussr',
astronaut,
password='general')
- data = self.run_sql('SELECT * FROM ab_user', "3", user_name='gagarin')
+ data = self.run_sql('SELECT * FROM ab_user', '3', user_name='gagarin')
db.session.query(Query).delete()
db.session.commit()
self.assertLess(0, len(data['data']))
@@ -97,8 +97,8 @@ def test_queries_endpoint(self):
self.assertEquals(2, len(data))
# Run 2 more queries
- self.run_sql("SELECT * FROM ab_user LIMIT 1", client_id='client_id_4')
- self.run_sql("SELECT * FROM ab_user LIMIT 2", client_id='client_id_5')
+ self.run_sql('SELECT * FROM ab_user LIMIT 1', client_id='client_id_4')
+ self.run_sql('SELECT * FROM ab_user LIMIT 2', client_id='client_id_5')
self.login('admin')
data = self.get_json_resp('/superset/queries/0')
self.assertEquals(4, len(data))
@@ -195,7 +195,7 @@ def test_search_query_on_time(self):
def test_alias_duplicate(self):
self.run_sql(
- "SELECT username as col, id as col, username FROM ab_user",
+ 'SELECT username as col, id as col, username FROM ab_user',
client_id='2e2df3',
user_name='admin',
raise_on_error=True)
diff --git a/tests/utils_tests.py b/tests/utils_tests.py
index dc6b454a36ff9..1d5cc65fab1c8 100644
--- a/tests/utils_tests.py
+++ b/tests/utils_tests.py
@@ -24,7 +24,7 @@ def test_json_int_dttm_ser(self):
assert json_int_dttm_ser(dttm + timedelta(milliseconds=1)) == (ts + 1)
with self.assertRaises(TypeError):
- json_int_dttm_ser("this is not a date")
+ json_int_dttm_ser('this is not a date')
def test_json_iso_dttm_ser(self):
dttm = datetime(2020, 1, 1)
@@ -35,7 +35,7 @@ def test_json_iso_dttm_ser(self):
assert json_iso_dttm_ser(t) == t.isoformat()
with self.assertRaises(TypeError):
- json_iso_dttm_ser("this is not a date")
+ json_iso_dttm_ser('this is not a date')
def test_base_json_conv(self):
assert isinstance(base_json_conv(numpy.bool_(1)), bool) is True
@@ -50,7 +50,7 @@ def test_parse_human_timedelta(self, mock_now):
self.assertEquals(parse_human_timedelta('now'), timedelta(0))
def test_zlib_compression(self):
- json_str = """{"test": 1}"""
+ json_str = '{"test": 1}'
blob = zlib_compress(json_str)
got_str = zlib_decompress_to_string(blob)
self.assertEquals(json_str, got_str)
diff --git a/tox.ini b/tox.ini
index 177858dfe3971..78198ea190894 100644
--- a/tox.ini
+++ b/tox.ini
@@ -16,9 +16,6 @@ exclude =
superset/data
superset/migrations
superset/templates
-ignore =
- Q000
- Q001
import-order-style = google
max-line-length = 90