diff --git a/superset/dataframe.py b/superset/dataframe.py index 1785fda32f6c9..9f7aa88b88b74 100644 --- a/superset/dataframe.py +++ b/superset/dataframe.py @@ -31,7 +31,7 @@ def data(self): return self.__df.to_dict(orient='records') @property - def columns_dict(self): + def columns(self): """Provides metadata about columns for data visualization. :return: dict, with the fields name, type, is_date, is_dim and agg. @@ -69,7 +69,6 @@ def columns_dict(self): if not column['agg']: column.pop('agg', None) columns.append(column) - return columns diff --git a/superset/sql_lab.py b/superset/sql_lab.py index 94d26a4991b9e..41a6b2e357c75 100644 --- a/superset/sql_lab.py +++ b/superset/sql_lab.py @@ -155,7 +155,7 @@ def handle_error(msg): 'query_id': query.id, 'status': query.status, 'data': cdf.data if cdf.data else [], - 'columns': cdf.columns_dict if cdf.columns_dict else {}, + 'columns': cdf.columns if cdf.columns else [], 'query': query.to_dict(), } payload = json.dumps(payload, default=utils.json_iso_dttm_ser) diff --git a/superset/views/core.py b/superset/views/core.py index 4813d9b3ed6d9..bc19519106664 100755 --- a/superset/views/core.py +++ b/superset/views/core.py @@ -2057,7 +2057,8 @@ def csv(self, client_id): if blob: json_payload = zlib.decompress(blob) obj = json.loads(json_payload) - df = pd.DataFrame.from_records(obj['data']) + columns = [c['name'] for c in obj['columns']] + df = pd.DataFrame.from_records(obj['data'], columns=columns) csv = df.to_csv(index=False, encoding='utf-8') else: sql = query.select_sql or query.executed_sql diff --git a/tests/celery_tests.py b/tests/celery_tests.py index 6a3802c7d258a..8da39be96c96e 100644 --- a/tests/celery_tests.py +++ b/tests/celery_tests.py @@ -188,7 +188,7 @@ def test_run_sync_query_cta(self): db_id, sql_where, "2", tmp_table='tmp_table_2', cta='true') self.assertEqual(QueryStatus.SUCCESS, result2['query']['state']) self.assertEqual([], result2['data']) - self.assertEqual({}, result2['columns']) + self.assertEqual([], result2['columns']) query2 = self.get_query_by_id(result2['query']['serverId']) # Check the data in the tmp table. @@ -204,7 +204,7 @@ def test_run_sync_query_cta_no_data(self): db_id, sql_empty_result, "3", tmp_table='tmp_table_3', cta='true') self.assertEqual(QueryStatus.SUCCESS, result3['query']['state']) self.assertEqual([], result3['data']) - self.assertEqual({}, result3['columns']) + self.assertEqual([], result3['columns']) query3 = self.get_query_by_id(result3['query']['serverId']) self.assertEqual(QueryStatus.SUCCESS, query3.status) @@ -238,7 +238,7 @@ def test_run_async_query(self): self.assertEqual(True, query.select_as_cta) self.assertEqual(True, query.select_as_cta_used) - def test_get_columns_dict(self): + def test_get_columns(self): main_db = self.get_main_database(db.session) df = main_db.get_df("SELECT * FROM multiformat_time_series", None) cdf = dataframe.SupersetDataFrame(df) @@ -260,7 +260,7 @@ def test_get_columns_dict(self): 'is_dim': False}, {'is_date': False, 'type': 'object', 'name': 'string3', 'is_dim': True}] - , cdf.columns_dict + , cdf.columns ) else: self.assertEqual( @@ -280,7 +280,7 @@ def test_get_columns_dict(self): 'is_dim': False}, {'is_date': False, 'type': 'object', 'name': 'string3', 'is_dim': True}] - , cdf.columns_dict + , cdf.columns )