+
- {% block tail_js %}
- {% if not standalone_mode %}
- {{ js_bundle('menu') }}
- {% endif %}
- {% if entry %}
- {{ js_bundle(entry) }}
- {% endif %}
- {% include "tail_js_custom_extra.html" %}
- {% endblock %}
+ {% block tail_js %} {% if not standalone_mode %} {{ js_bundle('menu') }} {%
+ endif %} {% if entry %} {{ js_bundle(entry) }} {% endif %} {% include
+ "tail_js_custom_extra.html" %} {% endblock %}
diff --git a/superset/temporary_cache/api.py b/superset/temporary_cache/api.py
index 0ecab44bf1748..5dc95c122ab55 100644
--- a/superset/temporary_cache/api.py
+++ b/superset/temporary_cache/api.py
@@ -24,13 +24,13 @@
from flask import request, Response
from marshmallow import ValidationError
-from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
-from superset.key_value.types import JsonKeyValueCodec
-from superset.temporary_cache.commands.exceptions import (
+from superset.commands.temporary_cache.exceptions import (
TemporaryCacheAccessDeniedError,
TemporaryCacheResourceNotFoundError,
)
-from superset.temporary_cache.commands.parameters import CommandParameters
+from superset.commands.temporary_cache.parameters import CommandParameters
+from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
+from superset.key_value.types import JsonKeyValueCodec
from superset.temporary_cache.schemas import (
TemporaryCachePostSchema,
TemporaryCachePutSchema,
diff --git a/superset/translations/de/LC_MESSAGES/messages.json b/superset/translations/de/LC_MESSAGES/messages.json
index 1c020467a646b..44d5d3009c3cd 100644
--- a/superset/translations/de/LC_MESSAGES/messages.json
+++ b/superset/translations/de/LC_MESSAGES/messages.json
@@ -2886,7 +2886,6 @@
"Manage email report": ["E-Mail-Bericht verwalten"],
"Manage your databases": ["Verwalten Sie Ihre Datenbanken"],
"Mandatory": ["Notwendig"],
- "Mangle Duplicate Columns": ["Doppelte Spalten zusammenführen"],
"Manually set min/max values for the y-axis.": [
"Min/Max-Werte für die y-Achse manuell festlegen."
],
diff --git a/superset/translations/de/LC_MESSAGES/messages.po b/superset/translations/de/LC_MESSAGES/messages.po
index 9234d0eaecc1b..6a1ff690194d8 100644
--- a/superset/translations/de/LC_MESSAGES/messages.po
+++ b/superset/translations/de/LC_MESSAGES/messages.po
@@ -9407,10 +9407,6 @@ msgstr "Verwalten Sie Ihre Datenbanken"
msgid "Mandatory"
msgstr "Notwendig"
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr "Doppelte Spalten zusammenführen"
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
msgid "Manually set min/max values for the y-axis."
msgstr "Min/Max-Werte für die y-Achse manuell festlegen."
diff --git a/superset/translations/en/LC_MESSAGES/messages.json b/superset/translations/en/LC_MESSAGES/messages.json
index e87511b901d00..f1d035d43a865 100644
--- a/superset/translations/en/LC_MESSAGES/messages.json
+++ b/superset/translations/en/LC_MESSAGES/messages.json
@@ -2093,7 +2093,6 @@
"Manage email report": [""],
"Manage your databases": [""],
"Mandatory": [""],
- "Mangle Duplicate Columns": [""],
"Manually set min/max values for the y-axis.": [""],
"Map": [""],
"Map Style": [""],
diff --git a/superset/translations/en/LC_MESSAGES/messages.po b/superset/translations/en/LC_MESSAGES/messages.po
index 7a84f6d5de070..ee3b15bd9b37a 100644
--- a/superset/translations/en/LC_MESSAGES/messages.po
+++ b/superset/translations/en/LC_MESSAGES/messages.po
@@ -8797,10 +8797,6 @@ msgstr ""
msgid "Mandatory"
msgstr ""
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr ""
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
msgid "Manually set min/max values for the y-axis."
msgstr ""
diff --git a/superset/translations/es/LC_MESSAGES/messages.json b/superset/translations/es/LC_MESSAGES/messages.json
index 57ae460a4f937..ea5636c0491e2 100644
--- a/superset/translations/es/LC_MESSAGES/messages.json
+++ b/superset/translations/es/LC_MESSAGES/messages.json
@@ -1801,7 +1801,6 @@
],
"Manage": ["Administrar"],
"Mandatory": ["Oblugatorio"],
- "Mangle Duplicate Columns": ["Manglar Columnas Duplicadas"],
"MapBox": [""],
"Mapbox": [""],
"March": ["Marzo"],
diff --git a/superset/translations/es/LC_MESSAGES/messages.po b/superset/translations/es/LC_MESSAGES/messages.po
index b629d66601a03..df4efb9b4b4bc 100644
--- a/superset/translations/es/LC_MESSAGES/messages.po
+++ b/superset/translations/es/LC_MESSAGES/messages.po
@@ -9400,10 +9400,6 @@ msgstr "Nombre de tu fuente de datos"
msgid "Mandatory"
msgstr "Oblugatorio"
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr "Manglar Columnas Duplicadas"
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
#, fuzzy
msgid "Manually set min/max values for the y-axis."
diff --git a/superset/translations/fr/LC_MESSAGES/messages.json b/superset/translations/fr/LC_MESSAGES/messages.json
index 5c000012face7..2391b33db8c2e 100644
--- a/superset/translations/fr/LC_MESSAGES/messages.json
+++ b/superset/translations/fr/LC_MESSAGES/messages.json
@@ -100,7 +100,7 @@
"1H": [""],
"1M": [""],
"1T": [""],
- "2 years ago": ["il y a 2 ans"],
+ "2 years ago": ["Il y a 2 ans"],
"2/98 percentiles": [""],
"28 days ago": [""],
"2D": [""],
@@ -226,6 +226,7 @@
"Add calculated temporal columns to dataset in \"Edit datasource\" modal": [
""
],
+ "Add cross-filter": ["Ajouter un filtre"],
"Add custom scoping": [""],
"Add delivery method": ["Ajouter méthode de livraison"],
"Add filter": ["Ajouter un filtre"],
@@ -556,6 +557,8 @@
""
],
"Append": ["Ajouter"],
+ "Applied filters (%d)": ["Filtres appliqués (%d)"],
+ "Applied filters: %s": ["Filtres appliqué: %s"],
"Applied rolling window did not return any data. Please make sure the source query satisfies the minimum periods defined in the rolling window.": [
"La fenêtre glissante appliquée n'a pas retourné de données. Assurez-vous que la requête source satisfasse les périodes minimum définies dans la fenêtre glissante."
],
@@ -630,6 +633,7 @@
"Batch editing %d filters:": ["Edition Batch %d filtres:"],
"Battery level over time": [""],
"Be careful.": ["Faites attention."],
+ "Before": ["Avant"],
"Big Number": ["Gros nombre"],
"Big Number Font Size": [""],
"Big Number with Trendline": ["Gros nombre avec tendance"],
@@ -1088,9 +1092,10 @@
],
"Creator": ["Créateur"],
"Cross-filter will be applied to all of the charts that use this dataset.": [
- ""
+ "Le filtre va être appliqué à tous les graphiques qui utilise cet ensemble de données"
],
"Currently rendered: %s": [""],
+ "Custom": ["Personnalisée"],
"Custom Plugin": ["Plugin custom"],
"Custom Plugins": ["Plugins custom"],
"Custom SQL": ["SQL personnalisé"],
@@ -1259,6 +1264,7 @@
"Datetime format": ["Format Datetime"],
"Day": ["Jour"],
"Day (freq=D)": [""],
+ "Days %s": ["Jours %s"],
"Db engine did not return all queried columns": [
"La base de données n'a pas retourné toutes les colonnes demandées"
],
@@ -1415,6 +1421,7 @@
"Divider": ["Diviseur"],
"Do you want a donut or a pie?": [""],
"Documentation": ["Documentation"],
+ "Download": ["Télécharger"],
"Download as image": ["Télécharger comme image"],
"Download to CSV": ["Télécharger en CSV"],
"Draft": ["Brouillon"],
@@ -1429,6 +1436,7 @@
"Drill by": [""],
"Drill by is not available for this data point": [""],
"Drill by is not yet supported for this chart type": [""],
+ "Drill by: %s": ["Trier par %s"],
"Drill to detail": [""],
"Drill to detail by": [""],
"Drill to detail by value is not yet supported for this chart type.": [
@@ -1635,7 +1643,10 @@
"Export": ["Exporter"],
"Export dashboards?": ["Exporter les tableaux de bords ?"],
"Export query": ["Exporter la requête"],
- "Export to YAML": ["Exporter en YAML"],
+ "Export to .CSV": ["Exporter au format CSV"],
+ "Export to .JSON": ["Exporter au format JSON"],
+ "Export to Excel": ["Exporter vers Excel"],
+ "Export to YAML": ["Exporter au format YAML"],
"Export to YAML?": ["Exporter en YAML?"],
"Export to original .CSV": [""],
"Export to pivoted .CSV": [""],
@@ -1818,6 +1829,7 @@
"Host": [""],
"Hostname or IP address": ["Nom d'hôte ou adresse IP"],
"Hour": ["Heure"],
+ "Hours %s": ["Heures %s"],
"Hours offset": ["Offset des heures"],
"How do you want to enter service account credentials?": [
"Comment voulez-vous entrer les informations de connexion du compte de service ?"
@@ -1981,9 +1993,11 @@
"Labels for the marker lines": [""],
"Labels for the markers": [""],
"Labels for the ranges": [""],
+ "Last": ["Dernier"],
"Last Changed": ["Dernière modification"],
"Last Modified": ["Dernière modification"],
"Last Updated %s": ["Dernière mise à jour %s"],
+ "Last Updated %s by %s": ["Dernière mise à jour %s"],
"Last modified": ["Dernière modification"],
"Last modified by %s": ["Dernière modification par %s"],
"Last run": ["Dernière exécution"],
@@ -2065,7 +2079,6 @@
],
"Manage": ["Gestion"],
"Mandatory": ["Obligatoire"],
- "Mangle Duplicate Columns": ["Supprimer les colonnes en double"],
"Manually set min/max values for the y-axis.": [""],
"Mapbox": ["Mapbox"],
"March": ["Mars"],
@@ -2142,6 +2155,7 @@
"Minimum value on the gauge axis": [""],
"Minor Split Line": [""],
"Minute": ["Minute"],
+ "Minutes %s": ["Minutes %s"],
"Missing dataset": ["Jeu de données manquant"],
"Mixed Time-Series": [""],
"Modified": ["Modifié"],
@@ -2150,6 +2164,7 @@
"Modified columns: %s": ["Colonnes modifiées : %s"],
"Monday": ["Lundi"],
"Month": ["Mois"],
+ "Months %s": ["Mois %s"],
"Move only": [""],
"Moves the given set of dates by a specified interval.": [
"Décale l'ensemble de dates d'un intervalle spécifié."
@@ -2235,6 +2250,7 @@
"No filter": ["Pas de filtre"],
"No filter is selected.": ["Pas de filtre sélectionné."],
"No form settings were maintained": [""],
+ "No matching records found": ["Aucun enregistrement trouvé"],
"No records found": ["Aucun enregistrement trouvé"],
"No results found": ["Aucun résultat trouvé"],
"No results match your filter criteria": [""],
@@ -2265,6 +2281,7 @@
"Nothing triggered": ["Rien déclenché"],
"Notification method": ["Méthode de notification"],
"November": ["Novembre"],
+ "Now": ["Maintenant"],
"Null or Empty": ["Null ou Vide"],
"Null values": ["Valeurs NULL"],
"Number bounds used for color encoding from red to blue.\n Reverse the numbers for blue to red. To get pure red or blue,\n you can enter either only min or max.": [
@@ -2590,6 +2607,7 @@
"Python datetime string pattern": ["Python datetime string pattern"],
"QUERY DATA IN SQL LAB": [""],
"Quarter": ["Trimestre"],
+ "Quarters %s": ["Trimestres %s"],
"Query": ["Requête"],
"Query %s: %s": [""],
"Query History": ["Historiques des requêtes"],
@@ -2652,8 +2670,10 @@
"Refresh frequency": ["Fréquence de rafraichissement"],
"Refresh interval": ["Intervalle d'actualisation"],
"Refresh the default values": ["Rafraichir les valeurs par défaut"],
+ "Refreshing charts": ["Rafraîchissement en cours"],
"Regular": [""],
"Relationships between community channels": [""],
+ "Relative Date/Time": ["Date/Heure Relative"],
"Relative period": ["Période relative"],
"Relative quantity": ["Quantité relative"],
"Remind me in 24 hours": ["Me le rappeler dans 24 heures"],
@@ -2730,6 +2750,7 @@
"Resource already has an attached report.": [""],
"Restore Filter": ["Restaurer le Filtre"],
"Results": ["Résultats"],
+ "Results %s": ["Résultats"],
"Results backend is not configured.": [
"Le backend des résultats n'est pas configuré."
],
@@ -2884,6 +2905,7 @@
"Secondary y-axis Bounds": [""],
"Secondary y-axis format": [""],
"Secondary y-axis title": [""],
+ "Seconds %s": ["Secondes %s"],
"Secure Extra": ["Sécurité"],
"Secure extra": ["Sécurité"],
"Security": ["Sécurité"],
@@ -2894,7 +2916,7 @@
"Select": ["Sélectionner"],
"Select ...": ["Sélectionner..."],
"Select Delivery Method": ["Choisir la méthode de livraison"],
- "Select Viz Type": ["Selectionner un type de visualisation"],
+ "Select Viz Type": ["Sélectionner un type de visualisation"],
"Select a Columnar file to be uploaded to a database.": [
"Sélectionner un fichier en colonne à téléverser dans une base de données."
],
@@ -2904,7 +2926,7 @@
"Select a column": ["Sélectionner une colonne"],
"Select a dashboard": ["Sélectionner un tableau de bord"],
"Select a database to upload the file to": [""],
- "Select a visualization type": ["Selectionner un type de visualisation"],
+ "Select a visualization type": ["Sélectionner un type de visualisation"],
"Select aggregate options": ["Sélectionner les options d’agrégat"],
"Select any columns for metadata inspection": [""],
"Select color scheme": ["Sélectionner un schéma de couleurs"],
@@ -2912,17 +2934,17 @@
"Select databases require additional fields to be completed in the Advanced tab to successfully connect the database. Learn what requirements your databases has ": [
""
],
- "Select filter": ["Selectionner un filtre"],
+ "Select filter": ["Sélectionner un filtre"],
"Select filter plugin using AntD": [""],
"Select first filter value by default": [
- "Selectionne la première valeur du filtre par défaut"
+ "Sélectionne la première valeur du filtre par défaut"
],
"Select operator": ["Sélectionner l'opérateur"],
"Select or type a value": ["Sélectionner ou renseigner une valeur"],
"Select owners": ["Sélectionner les propriétaires"],
"Select saved metrics": ["Sélectionner les métriques sauvegardées"],
"Select start and end date": [
- "Selectionner la date de début et la date de fin"
+ "Sélectionner la date de début et la date de fin"
],
"Select subject": ["Sélectionner un objet"],
"Select the charts to which you want to apply cross-filters in this dashboard. Deselecting a chart will exclude it from being filtered when applying cross-filters from any chart on the dashboard. You can select \"All charts\" to apply cross-filters to all charts that use the same dataset or contain the same column name in the dashboard.": [
@@ -2936,7 +2958,7 @@
"Select values in highlighted field(s) in the control panel. Then run the query by clicking on the %s button.": [
""
],
- "Send as CSV": ["Envoyer comme CSV"],
+ "Send as CSV": ["Envoyer au format CSV"],
"Send as PNG": ["Envoyer comme PNG"],
"Send as text": ["Envoyer comme texte"],
"Send range filter events to other charts": [""],
@@ -3082,6 +3104,7 @@
"Sort ascending": ["Tri croissant"],
"Sort bars by x labels.": [""],
"Sort by": ["Trier par"],
+ "Sort by %s": ["Trier par %s"],
"Sort columns alphabetically": ["Trier les colonnes alphabétiquement"],
"Sort descending": ["Tri décroissant"],
"Sort filter values": ["Trier les valeurs de filtre"],
@@ -3092,6 +3115,7 @@
"Source SQL": ["SQL source"],
"Sparkline": [""],
"Spatial": ["Spatial"],
+ "Specific Date/Time": ["Date/Heure Spécifique"],
"Specify a schema (if database flavor supports this).": [
"Spécifier un schéma (si la base de données soutient cette fonctionnalités)."
],
@@ -3634,7 +3658,7 @@
"Cela peut être soit une adresse IP (ex 127.0.0.1) ou un nom de domaine (ex mydatabase.com)."
],
"This chart applies cross-filters to charts whose datasets contain columns with the same name.": [
- ""
+ "Ce graphique filtre automatiquement les graphiques ayant des colonnes de même nom dans leurs ensembles de données."
],
"This chart has been moved to a different filter scope.": [
"Ce graphique a été déplacé vers un autre champ d'application du filtre."
@@ -3740,6 +3764,9 @@
"This value should be smaller than the right target value": [
"Cette valeur devrait être plus petite que la valeur cible de droite"
],
+ "This visualization type does not support cross-filtering.": [
+ "Ce type de visualisation ne supporte pas le cross-filtering."
+ ],
"This visualization type is not supported.": [
"Ce type de visualisation n'est pas supporté."
],
@@ -3916,6 +3943,7 @@
"Unexpected error occurred, please check your logs for details": [
"Erreur inattendue, consultez les logs pour plus de détails"
],
+ "Unexpected time range: %s": ["Intervalle de temps inattendu: %s"],
"Unknown": ["Erreur inconnue"],
"Unknown MySQL server host \"%(hostname)s\".": [
"Hôte MySQL \"%(hostname)s\" inconnu."
@@ -4134,6 +4162,7 @@
"Week_ending Sunday": ["Semaine terminant le dimanche"],
"Weekly Report for %s": [""],
"Weekly seasonality": [""],
+ "Weeks %s": ["Semaines %s"],
"What should be shown on the label?": [""],
"When `Calculation type` is set to \"Percentage change\", the Y Axis Format is forced to `.1%`": [
"Lorsque `Type de calcul` vaut \"Pourcentage de changement\", le format de l'axe Y est à forcé à `.1%`"
@@ -4283,6 +4312,7 @@
"Year": ["Année"],
"Year (freq=AS)": [""],
"Yearly seasonality": [""],
+ "Years %s": ["Année %s"],
"Yes": ["Oui"],
"Yes, cancel": ["Oui, annuler"],
"Yes, overwrite changes": [""],
@@ -4304,7 +4334,9 @@
"You can add the components in the": [
"Vous pouvez ajouter les composants via le"
],
- "You can also just click on the chart to apply cross-filter.": [""],
+ "You can also just click on the chart to apply cross-filter.": [
+ "Vous pouvez juste cliquer sur le graphique pour appliquer le filtre"
+ ],
"You can choose to display all charts that you have access to or only the ones you own.\n Your filter selection will be saved and remain active until you choose to change it.": [
""
],
@@ -4314,7 +4346,9 @@
"You can preview the list of dashboards in the chart settings dropdown.": [
""
],
- "You can't apply cross-filter on this data point.": [""],
+ "You can't apply cross-filter on this data point.": [
+ "Vous ne pouvez pas ajouter de filtre sur ce point de donnée"
+ ],
"You cannot delete the last temporal filter as it's used for time range filters in dashboards.": [
""
],
@@ -4440,6 +4474,7 @@
"aggregate": ["agrégat"],
"alert": ["alerte"],
"alerts": ["alertes"],
+ "all": ["Tous"],
"also copy (duplicate) charts": [
"copier également les graphiques (dupliquer)"
],
@@ -4525,6 +4560,11 @@
"json isn't valid": ["le json n'est pas valide"],
"key a-z": [""],
"key z-a": [""],
+ "last day": ["hier"],
+ "last month": ["le mois dernier"],
+ "last quarter": ["le trimestre dernier"],
+ "last week": ["la semaine dernière"],
+ "last year": ["l'année dernière"],
"latest partition:": ["dernière partition :"],
"less than {min} {name}": [""],
"log": ["log"],
@@ -4591,18 +4631,10 @@
"y: values are normalized within each row": [""],
"year": ["année"],
"zoom area": [""],
- "No matching records found": ["Aucun résultat trouvé"],
- "Seconds %s": ["%s secondes"],
- "Minutes %s": ["%s minutes "],
"10 seconds": ["10 secondes"],
"6 hours": ["6 heures"],
"12 hours": ["12 heures"],
- "24 hours": ["24 heures"],
- "Last day": ["Hier"],
- "Last week": ["La semaine derniere"],
- "Last month": ["Le mois dernier"],
- "Last quarter": ["Le trimestre dernier"],
- "Last year": ["L'année dernière"]
+ "24 hours": ["24 heures"]
}
}
}
diff --git a/superset/translations/fr/LC_MESSAGES/messages.po b/superset/translations/fr/LC_MESSAGES/messages.po
index 6c4cdd69c4298..ab2b065ce2c1f 100644
--- a/superset/translations/fr/LC_MESSAGES/messages.po
+++ b/superset/translations/fr/LC_MESSAGES/messages.po
@@ -1204,7 +1204,6 @@ msgid "Add calculated temporal columns to dataset in \"Edit datasource\" modal"
msgstr ""
#: superset-frontend/src/components/Chart/ChartContextMenu/ChartContextMenu.tsx:197
-#, fuzzy
msgid "Add cross-filter"
msgstr "Ajouter un filtre"
@@ -2400,14 +2399,14 @@ msgid "Applied cross-filters (%d)"
msgstr "Filtres croisés appliqués (%d)"
#: superset-frontend/src/dashboard/components/FiltersBadge/DetailsPanel/index.tsx:149
-#, fuzzy, python-format
+#, python-format
msgid "Applied filters (%d)"
msgstr "Filtres appliqués (%d)"
#: superset-frontend/src/dashboard/components/nativeFilters/FilterBar/FilterControls/FilterControls.tsx:260
-#, fuzzy, python-format
+#, python-format
msgid "Applied filters: %s"
-msgstr "Filtres appliqués (%d)"
+msgstr "Filtres appliqué: %s"
#: superset/viz.py:250
msgid ""
@@ -2797,7 +2796,6 @@ msgstr "Faites attention."
#: superset-frontend/src/components/AlteredSliceTag/index.jsx:178
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:75
-#, fuzzy
msgid "Before"
msgstr "Avant"
@@ -4921,7 +4919,7 @@ msgstr "Action"
#: superset-frontend/src/components/Chart/ChartContextMenu/ChartContextMenu.tsx:152
msgid "Cross-filter will be applied to all of the charts that use this dataset."
-msgstr ""
+msgstr "Le filtre va être appliqué à tous les graphiques qui utilise cet ensemble de données"
#: superset-frontend/src/components/Chart/ChartContextMenu/ChartContextMenu.tsx:164
#, fuzzy
@@ -4956,7 +4954,6 @@ msgid "Currently rendered: %s"
msgstr ""
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:33
-#, fuzzy
msgid "Custom"
msgstr "Personnalisée"
@@ -5598,7 +5595,7 @@ msgid "Day (freq=D)"
msgstr ""
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:65
-#, fuzzy, python-format
+#, python-format
msgid "Days %s"
msgstr "Jours %s"
@@ -6258,9 +6255,8 @@ msgstr "Édité"
#: superset-frontend/src/dashboard/components/SliceHeaderControls/index.tsx:482
#: superset-frontend/src/explore/components/useExploreAdditionalActionsMenu/index.jsx:292
-#, fuzzy
msgid "Download"
-msgstr "télécharger en CSV"
+msgstr "Télécharger"
#: superset-frontend/src/dashboard/components/Header/HeaderActionsDropdown/index.jsx:317
#: superset-frontend/src/dashboard/components/SliceHeaderControls/index.tsx:512
@@ -6335,7 +6331,7 @@ msgid "Drill by is not yet supported for this chart type"
msgstr ""
#: superset-frontend/src/components/Chart/DrillBy/DrillByModal.tsx:420
-#, fuzzy, python-format
+#, python-format
msgid "Drill by: %s"
msgstr "Trier par %s"
@@ -7312,24 +7308,21 @@ msgstr "Exporter la requête"
#: superset-frontend/src/dashboard/components/SliceHeaderControls/index.tsx:487
#: superset-frontend/src/explore/components/useExploreAdditionalActionsMenu/index.jsx:316
-#, fuzzy
msgid "Export to .CSV"
-msgstr "Exporter en YAML"
+msgstr "Exporter au format CSV"
#: superset-frontend/src/explore/components/useExploreAdditionalActionsMenu/index.jsx:323
-#, fuzzy
msgid "Export to .JSON"
-msgstr "Exporter en YAML"
+msgstr "Exporter au format JSON"
#: superset-frontend/src/dashboard/components/SliceHeaderControls/index.tsx:506
#: superset-frontend/src/explore/components/useExploreAdditionalActionsMenu/index.jsx:335
-#, fuzzy
msgid "Export to Excel"
-msgstr "Exporter en YAML"
+msgstr "Exporter vers Excel"
#: superset/views/base.py:607
msgid "Export to YAML"
-msgstr "Exporter en YAML"
+msgstr "Exporter au format YAML"
#: superset/views/base.py:607
msgid "Export to YAML?"
@@ -8251,7 +8244,7 @@ msgid "Hour"
msgstr "Heure"
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:64
-#, fuzzy, python-format
+#, python-format
msgid "Hours %s"
msgstr "Heures %s"
@@ -9038,9 +9031,8 @@ msgstr "Partage de requête"
#: superset-frontend/plugins/plugin-chart-pivot-table/src/plugin/controlPanel.tsx:190
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:31
-#, fuzzy
msgid "Last"
-msgstr "à"
+msgstr "Dernier"
#: superset/connectors/sqla/views.py:388 superset/views/database/mixins.py:190
msgid "Last Changed"
@@ -9056,7 +9048,7 @@ msgid "Last Updated %s"
msgstr "Dernière mise à jour %s"
#: superset-frontend/src/dashboard/components/OverwriteConfirm/OverwriteConfirmModal.tsx:182
-#, fuzzy, python-format
+#, python-format
msgid "Last Updated %s by %s"
msgstr "Dernière mise à jour %s"
@@ -9574,10 +9566,6 @@ msgstr "Donner un nom à la base de données"
msgid "Mandatory"
msgstr "Obligatoire"
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr "Supprimer les colonnes en double"
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
msgid "Manually set min/max values for the y-axis."
msgstr ""
@@ -10040,7 +10028,7 @@ msgid "Minute"
msgstr "Minute"
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:63
-#, fuzzy, python-format
+#, python-format
msgid "Minutes %s"
msgstr "Minutes %s"
@@ -10106,7 +10094,7 @@ msgid "Month"
msgstr "Mois"
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:67
-#, fuzzy, python-format
+#, python-format
msgid "Months %s"
msgstr "Mois %s"
@@ -10571,7 +10559,6 @@ msgid "No global filters are currently added"
msgstr "Aucun filtre ajouté"
#: superset-frontend/plugins/plugin-chart-table/src/TableChart.tsx:204
-#, fuzzy
msgid "No matching records found"
msgstr "Aucun enregistrement trouvé"
@@ -10792,7 +10779,6 @@ msgid "November"
msgstr "Novembre"
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:89
-#, fuzzy
msgid "Now"
msgstr "Maintenant"
@@ -12208,7 +12194,7 @@ msgid "Quarter"
msgstr "Trimestre"
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:68
-#, fuzzy, python-format
+#, python-format
msgid "Quarters %s"
msgstr "Trimestres %s"
@@ -12586,9 +12572,8 @@ msgid "Refresh the default values"
msgstr "Rafraichir les valeurs par défaut"
#: superset-frontend/src/dashboard/components/Header/HeaderActionsDropdown/index.jsx:163
-#, fuzzy
msgid "Refreshing charts"
-msgstr "Une erreur s'est produite durant la récupération des tableaux de bord : %s"
+msgstr "Rafraîchissement en cours"
#: superset-frontend/src/features/datasets/AddDataset/DatasetPanel/DatasetPanel.tsx:175
#, fuzzy
@@ -12636,7 +12621,6 @@ msgid "Relationships between community channels"
msgstr ""
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:88
-#, fuzzy
msgid "Relative Date/Time"
msgstr "Date/Heure Relative"
@@ -12931,7 +12915,7 @@ msgstr "Résultats"
#: superset-frontend/src/components/Chart/DrillBy/useResultsTableView.tsx:58
#: superset-frontend/src/explore/components/DataTablesPane/DataTablesPane.tsx:212
#: superset-frontend/src/explore/components/DataTablesPane/components/ResultsPaneOnDashboard.tsx:84
-#, fuzzy, python-format
+#, python-format
msgid "Results %s"
msgstr "Résultats"
@@ -13802,9 +13786,9 @@ msgid "Secondary y-axis title"
msgstr ""
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:62
-#, fuzzy, python-format
+#, python-format
msgid "Seconds %s"
-msgstr "%s secondes"
+msgstr "Secondes %s"
#: superset/views/database/mixins.py:194
msgid "Secure Extra"
@@ -13869,7 +13853,7 @@ msgstr "Choisir la méthode de livraison"
#: superset-frontend/src/explore/components/controls/VizTypeControl/FastVizSwitcher.tsx:94
msgid "Select Viz Type"
-msgstr "Selectionner un type de visualisation"
+msgstr "Sélectionner un type de visualisation"
#: superset/views/database/forms.py:425
msgid "Select a Columnar file to be uploaded to a database."
@@ -13930,7 +13914,7 @@ msgstr ""
#: superset-frontend/src/explore/components/controls/VizTypeControl/index.tsx:130
msgid "Select a visualization type"
-msgstr "Selectionner un type de visualisation"
+msgstr "Sélectionner un type de visualisation"
#: superset-frontend/src/explore/components/controls/MetricControl/AdhocMetricEditPopover/index.jsx:331
msgid "Select aggregate options"
@@ -14011,7 +13995,7 @@ msgstr "Selectionner un filtre"
#: superset-frontend/src/dashboard/components/nativeFilters/FiltersConfigModal/FiltersConfigForm/FiltersConfigForm.tsx:318
#: superset-frontend/src/filters/components/Select/index.ts:28
msgid "Select filter"
-msgstr "Selectionner un filtre"
+msgstr "Sélectionner un filtre"
#: superset-frontend/src/filters/components/Select/index.ts:29
msgid "Select filter plugin using AntD"
@@ -14019,7 +14003,7 @@ msgstr ""
#: superset-frontend/src/filters/components/Select/controlPanel.ts:104
msgid "Select first filter value by default"
-msgstr "Selectionne la première valeur du filtre par défaut"
+msgstr "Sélectionne la première valeur du filtre par défaut"
#: superset-frontend/src/explore/components/controls/FilterControl/AdhocFilterEditPopoverSimpleTabContent/index.tsx:362
msgid "Select operator"
@@ -14056,7 +14040,7 @@ msgstr "Sélectionner un schéma de couleurs"
#: superset-frontend/src/visualizations/FilterBox/FilterBox.jsx:307
msgid "Select start and end date"
-msgstr "Selectionner la date de début et la date de fin"
+msgstr "Sélectionner la date de début et la date de fin"
#: superset-frontend/src/explore/components/controls/FilterControl/AdhocFilterEditPopoverSimpleTabContent/index.tsx:334
msgid "Select subject"
@@ -14113,7 +14097,7 @@ msgstr ""
#: superset-frontend/src/features/alerts/AlertReportModal.tsx:408
msgid "Send as CSV"
-msgstr "Envoyer comme CSV"
+msgstr "Envoyer au format CSV"
#: superset-frontend/src/features/alerts/AlertReportModal.tsx:407
msgid "Send as PNG"
@@ -14831,7 +14815,7 @@ msgid "Sort by"
msgstr "Trier par"
#: superset-frontend/src/dashboard/components/SliceAdder.jsx:362
-#, fuzzy, python-format
+#, python-format
msgid "Sort by %s"
msgstr "Trier par %s"
@@ -14919,7 +14903,6 @@ msgid "Spatial"
msgstr "Spatial"
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:87
-#, fuzzy
msgid "Specific Date/Time"
msgstr "Date/Heure Spécifique"
@@ -16818,7 +16801,8 @@ msgstr ""
msgid ""
"This chart applies cross-filters to charts whose datasets contain columns"
" with the same name."
-msgstr ""
+msgstr "Ce graphique filtre automatiquement les graphiques ayant des colonnes de même nom dans leurs"
+" ensembles de données."
#: superset-frontend/src/dashboard/actions/dashboardLayout.js:260
msgid "This chart has been moved to a different filter scope."
@@ -17077,9 +17061,8 @@ msgid "This value should be smaller than the right target value"
msgstr "Cette valeur devrait être plus petite que la valeur cible de droite"
#: superset-frontend/src/components/Chart/ChartContextMenu/ChartContextMenu.tsx:171
-#, fuzzy
msgid "This visualization type does not support cross-filtering."
-msgstr "Ce type de visualisation n'est pas supporté."
+msgstr "Ce type de visualisation ne supporte pas le cross-filtering."
#: superset-frontend/src/explore/components/controls/VizTypeControl/index.tsx:64
msgid "This visualization type is not supported."
@@ -17931,9 +17914,9 @@ msgid "Unexpected error: "
msgstr "Erreur inattendue"
#: superset/views/api.py:108
-#, fuzzy, python-format
+#, python-format
msgid "Unexpected time range: %s"
-msgstr "Erreur inattendue"
+msgstr "Intervalle de temps inattendu: %s"
#: superset-frontend/src/features/home/ActivityTable.tsx:86
msgid "Unknown"
@@ -18701,7 +18684,7 @@ msgid "Weekly seasonality"
msgstr ""
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:66
-#, fuzzy, python-format
+#, python-format
msgid "Weeks %s"
msgstr "Semaines %s"
@@ -19350,7 +19333,7 @@ msgid "Yearly seasonality"
msgstr ""
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:69
-#, fuzzy, python-format
+#, python-format
msgid "Years %s"
msgstr "Année %s"
@@ -19452,7 +19435,7 @@ msgstr "Vous pouvez ajouter les composants via mode edition"
#: superset-frontend/src/components/Chart/ChartContextMenu/ChartContextMenu.tsx:157
msgid "You can also just click on the chart to apply cross-filter."
-msgstr ""
+msgstr "Vous pouvez juste cliquer sur le graphique pour appliquer le filtre"
#: superset-frontend/src/dashboard/components/SliceAdder.jsx:386
msgid ""
@@ -19477,7 +19460,7 @@ msgstr ""
#: superset-frontend/src/components/Chart/ChartContextMenu/ChartContextMenu.tsx:178
msgid "You can't apply cross-filter on this data point."
-msgstr ""
+msgstr "Vous ne pouvez pas ajouter de filtre sur ce point de donnée"
#: superset-frontend/src/explore/components/ControlPanelsContainer.tsx:501
msgid ""
@@ -19827,7 +19810,6 @@ msgstr "alertes"
#: superset-frontend/packages/superset-ui-chart-controls/src/shared-controls/sharedControls.tsx:160
#: superset-frontend/src/components/Chart/DrillDetail/DrillDetailMenuItems.tsx:205
#: superset-frontend/src/explore/controls.jsx:254
-#, fuzzy
msgid "all"
msgstr "Tous"
@@ -20421,27 +20403,22 @@ msgid "label"
msgstr "Label"
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:39
-#, fuzzy
msgid "last day"
msgstr "hier"
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:41
-#, fuzzy
msgid "last month"
msgstr "le mois dernier"
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:42
-#, fuzzy
msgid "last quarter"
msgstr "le trimestre dernier"
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:40
-#, fuzzy
msgid "last week"
-msgstr "la semaine derniere"
+msgstr "la semaine dernière"
#: superset-frontend/src/explore/components/controls/DateFilterControl/utils/constants.ts:43
-#, fuzzy
msgid "last year"
msgstr "l'année dernière"
diff --git a/superset/translations/it/LC_MESSAGES/messages.json b/superset/translations/it/LC_MESSAGES/messages.json
index c16648829654f..faacdd2ed08b5 100644
--- a/superset/translations/it/LC_MESSAGES/messages.json
+++ b/superset/translations/it/LC_MESSAGES/messages.json
@@ -1755,7 +1755,6 @@
],
"Manage": ["Gestisci"],
"Mandatory": [""],
- "Mangle Duplicate Columns": [""],
"Manually set min/max values for the y-axis.": [""],
"Map Style": [""],
"Mapbox": ["Mapbox"],
diff --git a/superset/translations/it/LC_MESSAGES/messages.po b/superset/translations/it/LC_MESSAGES/messages.po
index 6dc5d8c1a8d7d..816904b50ccef 100644
--- a/superset/translations/it/LC_MESSAGES/messages.po
+++ b/superset/translations/it/LC_MESSAGES/messages.po
@@ -9152,10 +9152,6 @@ msgstr "Database"
msgid "Mandatory"
msgstr ""
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr ""
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
msgid "Manually set min/max values for the y-axis."
msgstr ""
diff --git a/superset/translations/ja/LC_MESSAGES/messages.json b/superset/translations/ja/LC_MESSAGES/messages.json
index 3bccbe60ea9e0..dbec51427641f 100644
--- a/superset/translations/ja/LC_MESSAGES/messages.json
+++ b/superset/translations/ja/LC_MESSAGES/messages.json
@@ -1803,7 +1803,6 @@
],
"Manage": ["管理"],
"Mandatory": [""],
- "Mangle Duplicate Columns": [""],
"Manually set min/max values for the y-axis.": [""],
"Map Style": [""],
"MapBox": [""],
diff --git a/superset/translations/ja/LC_MESSAGES/messages.po b/superset/translations/ja/LC_MESSAGES/messages.po
index 4f61d7121122b..a0522914322ce 100644
--- a/superset/translations/ja/LC_MESSAGES/messages.po
+++ b/superset/translations/ja/LC_MESSAGES/messages.po
@@ -9156,10 +9156,6 @@ msgstr "データベースのインポート"
msgid "Mandatory"
msgstr ""
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr ""
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
msgid "Manually set min/max values for the y-axis."
msgstr ""
diff --git a/superset/translations/ko/LC_MESSAGES/messages.json b/superset/translations/ko/LC_MESSAGES/messages.json
index 17f4d50915cbf..58cbd4bccd979 100644
--- a/superset/translations/ko/LC_MESSAGES/messages.json
+++ b/superset/translations/ko/LC_MESSAGES/messages.json
@@ -1801,7 +1801,6 @@
],
"Manage": ["관리"],
"Mandatory": [""],
- "Mangle Duplicate Columns": [""],
"Manually set min/max values for the y-axis.": [""],
"Map Style": [""],
"MapBox": [""],
diff --git a/superset/translations/ko/LC_MESSAGES/messages.po b/superset/translations/ko/LC_MESSAGES/messages.po
index e48301a1f8cef..16cb93d2956ea 100644
--- a/superset/translations/ko/LC_MESSAGES/messages.po
+++ b/superset/translations/ko/LC_MESSAGES/messages.po
@@ -9084,10 +9084,6 @@ msgstr "데이터베이스 선택"
msgid "Mandatory"
msgstr ""
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr ""
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
msgid "Manually set min/max values for the y-axis."
msgstr ""
diff --git a/superset/translations/messages.pot b/superset/translations/messages.pot
index 8c1cc701f7f70..01b684883eae8 100644
--- a/superset/translations/messages.pot
+++ b/superset/translations/messages.pot
@@ -8796,10 +8796,6 @@ msgstr ""
msgid "Mandatory"
msgstr ""
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr ""
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
msgid "Manually set min/max values for the y-axis."
msgstr ""
diff --git a/superset/translations/nl/LC_MESSAGES/messages.json b/superset/translations/nl/LC_MESSAGES/messages.json
index 7148c1d3f23f6..e77f0459ee78a 100644
--- a/superset/translations/nl/LC_MESSAGES/messages.json
+++ b/superset/translations/nl/LC_MESSAGES/messages.json
@@ -2214,7 +2214,6 @@
],
"Manage": ["Beheer"],
"Mandatory": ["Verplicht"],
- "Mangle Duplicate Columns": ["Dubbele kolommen verwijderen"],
"Manually set min/max values for the y-axis.": [""],
"Map": [""],
"Map Style": [""],
diff --git a/superset/translations/nl/LC_MESSAGES/messages.po b/superset/translations/nl/LC_MESSAGES/messages.po
index b34df36498d11..6a6578d70590d 100644
--- a/superset/translations/nl/LC_MESSAGES/messages.po
+++ b/superset/translations/nl/LC_MESSAGES/messages.po
@@ -9110,10 +9110,6 @@ msgstr "Importeer databases"
msgid "Mandatory"
msgstr "Verplicht"
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr "Dubbele kolommen verwijderen"
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
msgid "Manually set min/max values for the y-axis."
msgstr ""
diff --git a/superset/translations/pt/LC_MESSAGES/message.json b/superset/translations/pt/LC_MESSAGES/message.json
index 37b1e6f4ce5bc..12284400e1b77 100644
--- a/superset/translations/pt/LC_MESSAGES/message.json
+++ b/superset/translations/pt/LC_MESSAGES/message.json
@@ -876,7 +876,6 @@
"Column to use as the row labels of the dataframe. Leave empty if no index column.": [
""
],
- "Mangle Duplicate Columns": ["Coluna Datahora principal"],
"Specify duplicate columns as \"X.0, X.1\".": [""],
"Skip Initial Space": [""],
"Skip spaces after delimiter.": [""],
diff --git a/superset/translations/pt/LC_MESSAGES/messages.json b/superset/translations/pt/LC_MESSAGES/messages.json
index 0b0d099b6c4c2..22cc78fa29b05 100644
--- a/superset/translations/pt/LC_MESSAGES/messages.json
+++ b/superset/translations/pt/LC_MESSAGES/messages.json
@@ -1730,7 +1730,6 @@
"Manage email report": [""],
"Manage your databases": [""],
"Mandatory": [""],
- "Mangle Duplicate Columns": ["Coluna Datahora principal"],
"Manually set min/max values for the y-axis.": [""],
"Map Style": [""],
"Mapbox": ["Mapbox"],
diff --git a/superset/translations/pt/LC_MESSAGES/messages.po b/superset/translations/pt/LC_MESSAGES/messages.po
index 5d2d4845ca58e..623233c405c0a 100644
--- a/superset/translations/pt/LC_MESSAGES/messages.po
+++ b/superset/translations/pt/LC_MESSAGES/messages.po
@@ -9263,10 +9263,6 @@ msgstr ""
msgid "Mandatory"
msgstr ""
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr "Coluna Datahora principal"
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
msgid "Manually set min/max values for the y-axis."
msgstr ""
diff --git a/superset/translations/pt_BR/LC_MESSAGES/messages.json b/superset/translations/pt_BR/LC_MESSAGES/messages.json
index 6e3a7333ba139..08c1b1f776db1 100644
--- a/superset/translations/pt_BR/LC_MESSAGES/messages.json
+++ b/superset/translations/pt_BR/LC_MESSAGES/messages.json
@@ -2724,7 +2724,6 @@
"Manage email report": ["Gerenciar relatório de e-mail"],
"Manage your databases": ["Gerenciar seus bancos de dados"],
"Mandatory": ["Obrigatório"],
- "Mangle Duplicate Columns": ["Emaranhar colunas duplicadas"],
"Manually set min/max values for the y-axis.": [
"Definir manualmente os valores mínimo/máximo para o eixo y."
],
diff --git a/superset/translations/pt_BR/LC_MESSAGES/messages.po b/superset/translations/pt_BR/LC_MESSAGES/messages.po
index 639b2d42c227e..d6d922df307cf 100644
--- a/superset/translations/pt_BR/LC_MESSAGES/messages.po
+++ b/superset/translations/pt_BR/LC_MESSAGES/messages.po
@@ -9396,10 +9396,6 @@ msgstr "Gerenciar seus bancos de dados"
msgid "Mandatory"
msgstr "Obrigatório"
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr "Emaranhar colunas duplicadas"
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
msgid "Manually set min/max values for the y-axis."
msgstr "Definir manualmente os valores mínimo/máximo para o eixo y."
diff --git a/superset/translations/ru/LC_MESSAGES/messages.json b/superset/translations/ru/LC_MESSAGES/messages.json
index 616630b37a3cd..91907df1ef328 100644
--- a/superset/translations/ru/LC_MESSAGES/messages.json
+++ b/superset/translations/ru/LC_MESSAGES/messages.json
@@ -2678,7 +2678,6 @@
"Manage email report": ["Управление рассылкой по почте"],
"Manage your databases": ["Управляйте своими базами данных"],
"Mandatory": ["Обязательно"],
- "Mangle Duplicate Columns": ["Управление повторяющимися столбцами"],
"Manually set min/max values for the y-axis.": [
"Вручную задать мин./макс. значения для оси Y"
],
diff --git a/superset/translations/ru/LC_MESSAGES/messages.po b/superset/translations/ru/LC_MESSAGES/messages.po
index 8d666021c72a5..bef790f515f6f 100644
--- a/superset/translations/ru/LC_MESSAGES/messages.po
+++ b/superset/translations/ru/LC_MESSAGES/messages.po
@@ -9259,10 +9259,6 @@ msgstr "Управляйте своими базами данных"
msgid "Mandatory"
msgstr "Обязательно"
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr "Управление повторяющимися столбцами"
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
msgid "Manually set min/max values for the y-axis."
msgstr "Вручную задать мин./макс. значения для оси Y"
diff --git a/superset/translations/sk/LC_MESSAGES/messages.json b/superset/translations/sk/LC_MESSAGES/messages.json
index 43be893edfd81..6a3291fe740f5 100644
--- a/superset/translations/sk/LC_MESSAGES/messages.json
+++ b/superset/translations/sk/LC_MESSAGES/messages.json
@@ -2089,7 +2089,6 @@
"Manage email report": [""],
"Manage your databases": [""],
"Mandatory": [""],
- "Mangle Duplicate Columns": [""],
"Manually set min/max values for the y-axis.": [""],
"Map": [""],
"Map Style": [""],
diff --git a/superset/translations/sk/LC_MESSAGES/messages.po b/superset/translations/sk/LC_MESSAGES/messages.po
index 9c8141b1d5697..3d2e044ccdbe9 100644
--- a/superset/translations/sk/LC_MESSAGES/messages.po
+++ b/superset/translations/sk/LC_MESSAGES/messages.po
@@ -8841,10 +8841,6 @@ msgstr ""
msgid "Mandatory"
msgstr ""
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr ""
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
msgid "Manually set min/max values for the y-axis."
msgstr ""
diff --git a/superset/translations/sl/LC_MESSAGES/messages.json b/superset/translations/sl/LC_MESSAGES/messages.json
index 4d2c51fa75123..1f50e6a301704 100644
--- a/superset/translations/sl/LC_MESSAGES/messages.json
+++ b/superset/translations/sl/LC_MESSAGES/messages.json
@@ -2484,7 +2484,6 @@
"Manage email report": ["Upravljaj e-poštno poročilo"],
"Manage your databases": ["Upravljajte podatkovne baze"],
"Mandatory": ["Obvezno"],
- "Mangle Duplicate Columns": ["Odstrani podvojene stolpce"],
"Map": ["Zemljevid"],
"Map Style": ["Slog zemljevida"],
"MapBox": ["MapBox"],
diff --git a/superset/translations/sl/LC_MESSAGES/messages.po b/superset/translations/sl/LC_MESSAGES/messages.po
index f6ba479f82e11..0841a840cd4bd 100644
--- a/superset/translations/sl/LC_MESSAGES/messages.po
+++ b/superset/translations/sl/LC_MESSAGES/messages.po
@@ -9418,10 +9418,6 @@ msgstr "Upravljajte podatkovne baze"
msgid "Mandatory"
msgstr "Obvezno"
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr "Odstrani podvojene stolpce"
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
#, fuzzy
msgid "Manually set min/max values for the y-axis."
diff --git a/superset/translations/zh/LC_MESSAGES/messages.json b/superset/translations/zh/LC_MESSAGES/messages.json
index 41fdda34f3bc6..e66218a493ee8 100644
--- a/superset/translations/zh/LC_MESSAGES/messages.json
+++ b/superset/translations/zh/LC_MESSAGES/messages.json
@@ -1974,7 +1974,6 @@
"Manage": ["管理"],
"Manage your databases": ["管理你的数据库"],
"Mandatory": ["必填参数"],
- "Mangle Duplicate Columns": ["混合重复列"],
"Map": ["地图"],
"Map Style": ["地图样式"],
"MapBox": ["MapBox地图"],
diff --git a/superset/translations/zh/LC_MESSAGES/messages.po b/superset/translations/zh/LC_MESSAGES/messages.po
index e05a11e2e9299..791ea2c4db7be 100644
--- a/superset/translations/zh/LC_MESSAGES/messages.po
+++ b/superset/translations/zh/LC_MESSAGES/messages.po
@@ -3723,12 +3723,12 @@ msgstr "清除"
#: superset-frontend/src/dashboard/components/nativeFilters/FilterBar/ActionButtons/index.tsx:152
msgid "Clear all"
-msgstr "清楚所有"
+msgstr "清除所有"
#: superset-frontend/src/components/Table/index.tsx:210
#, fuzzy
msgid "Clear all data"
-msgstr "清楚所有"
+msgstr "清除所有"
#: superset-frontend/src/explore/components/ControlPanelsContainer.tsx:675
#, fuzzy
@@ -9138,10 +9138,6 @@ msgstr "管理你的数据库"
msgid "Mandatory"
msgstr "必填参数"
-#: superset/views/database/forms.py:360
-msgid "Mangle Duplicate Columns"
-msgstr "混合重复列"
-
#: superset-frontend/src/explore/components/controls/TimeSeriesColumnControl/index.jsx:297
#, fuzzy
msgid "Manually set min/max values for the y-axis."
@@ -19604,7 +19600,7 @@ msgstr "无法更新您的查询"
#: superset-frontend/plugins/legacy-plugin-chart-horizon/src/controlPanel.ts:86
#, fuzzy
msgid "overall"
-msgstr "清楚所有"
+msgstr "清除所有"
#: superset-frontend/plugins/legacy-plugin-chart-paired-t-test/src/controlPanel.ts:77
msgid "p-value precision"
diff --git a/superset/utils/cache.py b/superset/utils/cache.py
index 693f3a73bcfe5..48e283e7c11cd 100644
--- a/superset/utils/cache.py
+++ b/superset/utils/cache.py
@@ -89,14 +89,7 @@ def set_and_log_cache(
logger = logging.getLogger(__name__)
-def view_cache_key(*args: Any, **kwargs: Any) -> str: # pylint: disable=unused-argument
- args_hash = hash(frozenset(request.args.items()))
- return f"view/{request.path}/{args_hash}"
-
-
-def memoized_func(
- key: str | None = None, cache: Cache = cache_manager.cache
-) -> Callable[..., Any]:
+def memoized_func(key: str, cache: Cache = cache_manager.cache) -> Callable[..., Any]:
"""
Decorator with configurable key and cache backend.
@@ -129,14 +122,11 @@ def wrapped_f(*args: Any, **kwargs: Any) -> Any:
if not kwargs.get("cache", True):
return f(*args, **kwargs)
- if key:
- # format the key using args/kwargs passed to the decorated function
- signature = inspect.signature(f)
- bound_args = signature.bind(*args, **kwargs)
- bound_args.apply_defaults()
- cache_key = key.format(**bound_args.arguments)
- else:
- cache_key = view_cache_key(*args, **kwargs)
+ # format the key using args/kwargs passed to the decorated function
+ signature = inspect.signature(f)
+ bound_args = signature.bind(*args, **kwargs)
+ bound_args.apply_defaults()
+ cache_key = key.format(**bound_args.arguments)
obj = cache.get(cache_key)
if not kwargs.get("force") and obj is not None:
@@ -153,7 +143,7 @@ def wrapped_f(*args: Any, **kwargs: Any) -> Any:
def etag_cache(
cache: Cache = cache_manager.cache,
get_last_modified: Callable[..., datetime] | None = None,
- max_age: int | float | None = None,
+ max_age: int | float = app.config["CACHE_DEFAULT_TIMEOUT"],
raise_for_access: Callable[..., Any] | None = None,
skip: Callable[..., bool] | None = None,
) -> Callable[..., Any]:
@@ -169,8 +159,6 @@ def etag_cache(
dataframe cache for requests that produce the same SQL.
"""
- if max_age is None:
- max_age = app.config["CACHE_DEFAULT_TIMEOUT"]
def decorator(f: Callable[..., Any]) -> Callable[..., Any]:
@wraps(f)
diff --git a/superset/utils/core.py b/superset/utils/core.py
index 7ec36981cc5eb..b9c24076a4e12 100644
--- a/superset/utils/core.py
+++ b/superset/utils/core.py
@@ -60,7 +60,7 @@
import sqlalchemy as sa
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import Certificate, load_pem_x509_certificate
-from flask import current_app, flash, g, Markup, request
+from flask import current_app, g, Markup, request
from flask_appbuilder import SQLA
from flask_appbuilder.security.sqla.models import User
from flask_babel import gettext as __
@@ -72,7 +72,7 @@
from sqlalchemy.engine import Connection, Engine
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.sql.type_api import Variant
-from sqlalchemy.types import TEXT, TypeDecorator, TypeEngine
+from sqlalchemy.types import TypeEngine
from typing_extensions import TypeGuard
from superset.constants import (
@@ -105,7 +105,7 @@
from superset.utils.hashing import md5_sha_from_dict, md5_sha_from_str
if TYPE_CHECKING:
- from superset.connectors.base.models import BaseColumn, BaseDatasource
+ from superset.connectors.sqla.models import BaseDatasource, TableColumn
from superset.models.sql_lab import Query
logging.getLogger("MARKDOWN").setLevel(logging.INFO)
@@ -122,18 +122,6 @@
ADHOC_FILTERS_REGEX = re.compile("^adhoc_filters")
-class LenientEnum(Enum):
- """Enums with a `get` method that convert a enum value to `Enum` if it is a
- valid value."""
-
- @classmethod
- def get(cls, value: Any) -> Any:
- try:
- return super().__new__(cls, value)
- except ValueError:
- return None
-
-
class AdhocMetricExpressionType(StrEnum):
SIMPLE = "SIMPLE"
SQL = "SQL"
@@ -280,15 +268,6 @@ class PostProcessingContributionOrientation(StrEnum):
COLUMN = "column"
-class QueryMode(str, LenientEnum):
- """
- Whether the query runs on aggregate or returns raw records
- """
-
- RAW = "raw"
- AGGREGATE = "aggregate"
-
-
class QuerySource(Enum):
"""
The source of a SQL query.
@@ -354,17 +333,6 @@ class ColumnSpec(NamedTuple):
python_date_format: str | None = None
-def flasher(msg: str, severity: str = "message") -> None:
- """Flask's flash if available, logging call if not"""
- try:
- flash(msg, severity)
- except RuntimeError:
- if severity == "danger":
- logger.error(msg, exc_info=True)
- else:
- logger.info(msg)
-
-
def parse_js_uri_path_item(
item: str | None, unquote: bool = True, eval_undefined: bool = False
) -> str | None:
@@ -448,15 +416,6 @@ def cast_to_boolean(value: Any) -> bool | None:
return False
-def list_minus(l: list[Any], minus: list[Any]) -> list[Any]:
- """Returns l without what is in minus
-
- >>> list_minus([1, 2, 3], [2])
- [1, 3]
- """
- return [o for o in l if o not in minus]
-
-
class DashboardEncoder(json.JSONEncoder):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@@ -474,22 +433,6 @@ def default(self, o: Any) -> dict[Any, Any] | str:
return json.JSONEncoder(sort_keys=True).default(o)
-class JSONEncodedDict(TypeDecorator): # pylint: disable=abstract-method
- """Represents an immutable structure as a json-encoded string."""
-
- impl = TEXT
-
- def process_bind_param(
- self, value: dict[Any, Any] | None, dialect: str
- ) -> str | None:
- return json.dumps(value) if value is not None else None
-
- def process_result_value(
- self, value: str | None, dialect: str
- ) -> dict[Any, Any] | None:
- return json.loads(value) if value is not None else None
-
-
def format_timedelta(time_delta: timedelta) -> str:
"""
Ensures negative time deltas are easily interpreted by humans
@@ -995,13 +938,6 @@ def get_email_address_list(address_string: str) -> list[str]:
return [x.strip() for x in address_string_list if x.strip()]
-def get_email_address_str(address_string: str) -> str:
- address_list = get_email_address_list(address_string)
- address_list_str = ", ".join(address_list)
-
- return address_list_str
-
-
def choicify(values: Iterable[Any]) -> list[tuple[Any, Any]]:
"""Takes an iterable and makes an iterable of tuples with it"""
return [(v, v) for v in values]
@@ -1692,7 +1628,7 @@ def extract_dataframe_dtypes(
return generic_types
-def extract_column_dtype(col: BaseColumn) -> GenericDataType:
+def extract_column_dtype(col: TableColumn) -> GenericDataType:
if col.is_temporal:
return GenericDataType.TEMPORAL
if col.is_numeric:
@@ -1701,15 +1637,6 @@ def extract_column_dtype(col: BaseColumn) -> GenericDataType:
return GenericDataType.STRING
-def indexed(items: list[Any], key: str | Callable[[Any], Any]) -> dict[Any, list[Any]]:
- """Build an index for a list of objects"""
- idx: dict[Any, Any] = {}
- for item in items:
- key_ = getattr(item, key) if isinstance(key, str) else key(item)
- idx.setdefault(key_, []).append(item)
- return idx
-
-
def is_test() -> bool:
return parse_boolean_string(os.environ.get("SUPERSET_TESTENV", "false"))
diff --git a/superset/utils/date_parser.py b/superset/utils/date_parser.py
index 438e379a96cf0..2d49424a82c65 100644
--- a/superset/utils/date_parser.py
+++ b/superset/utils/date_parser.py
@@ -41,7 +41,7 @@
Suppress,
)
-from superset.charts.commands.exceptions import (
+from superset.commands.chart.exceptions import (
TimeDeltaAmbiguousError,
TimeRangeAmbiguousError,
TimeRangeParseFailError,
diff --git a/superset/utils/pandas_postprocessing/cum.py b/superset/utils/pandas_postprocessing/cum.py
index 128fa970f5f79..d3eb969f79fc4 100644
--- a/superset/utils/pandas_postprocessing/cum.py
+++ b/superset/utils/pandas_postprocessing/cum.py
@@ -46,6 +46,7 @@ def cum(
"""
columns = columns or {}
df_cum = df.loc[:, columns.keys()]
+ df_cum = df_cum.fillna(0)
operation = "cum" + operator
if operation not in ALLOWLIST_CUMULATIVE_FUNCTIONS or not hasattr(
df_cum, operation
diff --git a/superset/utils/retries.py b/superset/utils/retries.py
index 8a1e6b95eadcb..3af821362de93 100644
--- a/superset/utils/retries.py
+++ b/superset/utils/retries.py
@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
+import logging
from collections.abc import Generator
from typing import Any, Callable, Optional
@@ -26,6 +27,7 @@ def retry_call(
*args: Any,
strategy: Callable[..., Generator[int, None, None]] = backoff.constant,
exception: type[Exception] = Exception,
+ giveup_log_level: int = logging.WARNING,
fargs: Optional[list[Any]] = None,
fkwargs: Optional[dict[str, Any]] = None,
**kwargs: Any
@@ -33,6 +35,7 @@ def retry_call(
"""
Retry a given call.
"""
+ kwargs["giveup_log_level"] = giveup_log_level
decorated = backoff.on_exception(strategy, exception, *args, **kwargs)(func)
fargs = fargs or []
fkwargs = fkwargs or {}
diff --git a/superset/utils/screenshots.py b/superset/utils/screenshots.py
index 8609d65038273..bf6ed0f9e8493 100644
--- a/superset/utils/screenshots.py
+++ b/superset/utils/screenshots.py
@@ -201,7 +201,7 @@ def resize_image(
logger.debug("Cropping to: %s*%s", str(img.size[0]), str(desired_width))
img = img.crop((0, 0, img.size[0], desired_width))
logger.debug("Resizing to %s", str(thumb_size))
- img = img.resize(thumb_size, Image.ANTIALIAS)
+ img = img.resize(thumb_size, Image.Resampling.LANCZOS)
new_img = BytesIO()
if output != "png":
img = img.convert("RGB")
diff --git a/superset/utils/url_map_converters.py b/superset/utils/url_map_converters.py
index 11e40267b30c5..ed1004022793f 100644
--- a/superset/utils/url_map_converters.py
+++ b/superset/utils/url_map_converters.py
@@ -18,7 +18,7 @@
from werkzeug.routing import BaseConverter, Map
-from superset.tags.models import ObjectTypes
+from superset.tags.models import ObjectType
class RegexConverter(BaseConverter):
@@ -31,7 +31,7 @@ class ObjectTypeConverter(BaseConverter):
"""Validate that object_type is indeed an object type."""
def to_python(self, value: str) -> Any:
- return ObjectTypes[value]
+ return ObjectType[value]
def to_url(self, value: Any) -> str:
return value.name
diff --git a/superset/utils/webdriver.py b/superset/utils/webdriver.py
index 4353319072287..4552600fc9f1c 100644
--- a/superset/utils/webdriver.py
+++ b/superset/utils/webdriver.py
@@ -48,8 +48,8 @@
if feature_flag_manager.is_feature_enabled("PLAYWRIGHT_REPORTS_AND_THUMBNAILS"):
from playwright.sync_api import (
BrowserContext,
- ElementHandle,
Error as PlaywrightError,
+ Locator,
Page,
sync_playwright,
TimeoutError as PlaywrightTimeout,
@@ -105,14 +105,7 @@ def find_unexpected_errors(page: Page) -> list[str]:
alert_div.get_by_role("button").click()
# wait for modal to show up
- page.wait_for_selector(
- ".ant-modal-content",
- timeout=current_app.config[
- "SCREENSHOT_WAIT_FOR_ERROR_MODAL_VISIBLE"
- ]
- * 1000,
- state="visible",
- )
+ page.locator(".ant-modal-content").wait_for(state="visible")
err_msg_div = page.locator(".ant-modal-content .ant-modal-body")
#
# # collect error message
@@ -125,14 +118,7 @@ def find_unexpected_errors(page: Page) -> list[str]:
page.locator(".ant-modal-content .ant-modal-close").click()
#
# # wait until the modal becomes invisible
- page.wait_for_selector(
- ".ant-modal-content",
- timeout=current_app.config[
- "SCREENSHOT_WAIT_FOR_ERROR_MODAL_INVISIBLE"
- ]
- * 1000,
- state="detached",
- )
+ page.locator(".ant-modal-content").wait_for(state="detached")
try:
# Even if some errors can't be updated in the screenshot,
# keep all the errors in the server log and do not fail the loop
@@ -147,9 +133,12 @@ def find_unexpected_errors(page: Page) -> list[str]:
return error_messages
- def get_screenshot(self, url: str, element_name: str, user: User) -> bytes | None:
+ def get_screenshot( # pylint: disable=too-many-locals, too-many-statements
+ self, url: str, element_name: str, user: User
+ ) -> bytes | None:
with sync_playwright() as playwright:
- browser = playwright.chromium.launch()
+ browser_args = current_app.config["WEBDRIVER_OPTION_ARGS"]
+ browser = playwright.chromium.launch(args=browser_args)
pixel_density = current_app.config["WEBDRIVER_WINDOW"].get(
"pixel_density", 1
)
@@ -166,24 +155,31 @@ def get_screenshot(self, url: str, element_name: str, user: User) -> bytes | Non
)
self.auth(user, context)
page = context.new_page()
- page.goto(
- url, wait_until=current_app.config["SCREENSHOT_PLAYWRIGHT_WAIT_EVENT"]
- )
+ try:
+ page.goto(
+ url,
+ wait_until=current_app.config["SCREENSHOT_PLAYWRIGHT_WAIT_EVENT"],
+ )
+ except PlaywrightTimeout:
+ logger.exception(
+ "Web event %s not detected. Page %s might not have been fully loaded",
+ current_app.config["SCREENSHOT_PLAYWRIGHT_WAIT_EVENT"],
+ url,
+ )
+
img: bytes | None = None
selenium_headstart = current_app.config["SCREENSHOT_SELENIUM_HEADSTART"]
logger.debug("Sleeping for %i seconds", selenium_headstart)
page.wait_for_timeout(selenium_headstart * 1000)
- element: ElementHandle
+ element: Locator
try:
try:
# page didn't load
logger.debug(
"Wait for the presence of %s at url: %s", element_name, url
)
- element = page.wait_for_selector(
- f".{element_name}",
- timeout=self._screenshot_locate_wait * 1000,
- )
+ element = page.locator(f".{element_name}")
+ element.wait_for()
except PlaywrightTimeout as ex:
logger.exception("Timed out requesting url %s", url)
raise ex
@@ -191,9 +187,10 @@ def get_screenshot(self, url: str, element_name: str, user: User) -> bytes | Non
try:
# chart containers didn't render
logger.debug("Wait for chart containers to draw at url: %s", url)
- page.wait_for_selector(
- ".slice_container", timeout=self._screenshot_locate_wait * 1000
- )
+ slice_container_locator = page.locator(".slice_container")
+ slice_container_locator.first.wait_for()
+ for slice_container_elem in slice_container_locator.all():
+ slice_container_elem.wait_for()
except PlaywrightTimeout as ex:
logger.exception(
"Timed out waiting for chart containers to draw at url %s",
@@ -205,11 +202,8 @@ def get_screenshot(self, url: str, element_name: str, user: User) -> bytes | Non
logger.debug(
"Wait for loading element of charts to be gone at url: %s", url
)
- page.wait_for_selector(
- ".loading",
- timeout=self._screenshot_load_wait * 1000,
- state="detached",
- )
+ for loading_element in page.locator(".loading").all():
+ loading_element.wait_for(state="detached")
except PlaywrightTimeout as ex:
logger.exception(
"Timed out waiting for charts to load at url %s", url
diff --git a/superset/views/api.py b/superset/views/api.py
index 312efb947e4aa..eeedd7c641303 100644
--- a/superset/views/api.py
+++ b/superset/views/api.py
@@ -26,7 +26,7 @@
from flask_babel import lazy_gettext as _
from superset import db, event_logger
-from superset.charts.commands.exceptions import (
+from superset.commands.chart.exceptions import (
TimeRangeAmbiguousError,
TimeRangeParseFailError,
)
diff --git a/superset/views/base.py b/superset/views/base.py
index 4015b7a028aa6..9149c7ad919ac 100644
--- a/superset/views/base.py
+++ b/superset/views/base.py
@@ -14,6 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
+from __future__ import annotations
+
import dataclasses
import functools
import logging
@@ -21,7 +23,7 @@
import traceback
from datetime import datetime
from importlib.resources import files
-from typing import Any, Callable, cast, Optional, Union
+from typing import Any, Callable, cast
import simplejson as json
import yaml
@@ -120,6 +122,7 @@
"ALERT_REPORTS_DEFAULT_WORKING_TIMEOUT",
"NATIVE_FILTER_DEFAULT_ROW_LIMIT",
"PREVENT_UNSAFE_DEFAULT_URLS_ON_DATASET",
+ "JWT_ACCESS_CSRF_COOKIE_NAME",
)
logger = logging.getLogger(__name__)
@@ -139,15 +142,11 @@ def get_error_msg() -> str:
def json_error_response(
- msg: Optional[str] = None,
+ msg: str | None = None,
status: int = 500,
- payload: Optional[dict[str, Any]] = None,
- link: Optional[str] = None,
+ payload: dict[str, Any] | None = None,
) -> FlaskResponse:
- if not payload:
- payload = {"error": f"{msg}"}
- if link:
- payload["link"] = link
+ payload = payload or {"error": f"{msg}"}
return Response(
json.dumps(payload, default=utils.json_iso_dttm_ser, ignore_nan=True),
@@ -159,10 +158,9 @@ def json_error_response(
def json_errors_response(
errors: list[SupersetError],
status: int = 500,
- payload: Optional[dict[str, Any]] = None,
+ payload: dict[str, Any] | None = None,
) -> FlaskResponse:
- if not payload:
- payload = {}
+ payload = payload or {}
payload["errors"] = [dataclasses.asdict(error) for error in errors]
return Response(
@@ -182,7 +180,7 @@ def data_payload_response(payload_json: str, has_error: bool = False) -> FlaskRe
def generate_download_headers(
- extension: str, filename: Optional[str] = None
+ extension: str, filename: str | None = None
) -> dict[str, Any]:
filename = filename if filename else datetime.now().strftime("%Y%m%d_%H%M%S")
content_disp = f"attachment; filename={filename}.{extension}"
@@ -192,7 +190,7 @@ def generate_download_headers(
def deprecated(
eol_version: str = "4.0.0",
- new_target: Optional[str] = None,
+ new_target: str | None = None,
) -> Callable[[Callable[..., FlaskResponse]], Callable[..., FlaskResponse]]:
"""
A decorator to set an API endpoint from SupersetView has deprecated.
@@ -200,7 +198,7 @@ def deprecated(
"""
def _deprecated(f: Callable[..., FlaskResponse]) -> Callable[..., FlaskResponse]:
- def wraps(self: "BaseSupersetView", *args: Any, **kwargs: Any) -> FlaskResponse:
+ def wraps(self: BaseSupersetView, *args: Any, **kwargs: Any) -> FlaskResponse:
message = (
"%s.%s "
"This API endpoint is deprecated and will be removed in version %s"
@@ -227,7 +225,7 @@ def api(f: Callable[..., FlaskResponse]) -> Callable[..., FlaskResponse]:
return the response in the JSON format
"""
- def wraps(self: "BaseSupersetView", *args: Any, **kwargs: Any) -> FlaskResponse:
+ def wraps(self: BaseSupersetView, *args: Any, **kwargs: Any) -> FlaskResponse:
try:
return f(self, *args, **kwargs)
except NoAuthorizationError:
@@ -249,7 +247,7 @@ def handle_api_exception(
exceptions.
"""
- def wraps(self: "BaseSupersetView", *args: Any, **kwargs: Any) -> FlaskResponse:
+ def wraps(self: BaseSupersetView, *args: Any, **kwargs: Any) -> FlaskResponse:
try:
return f(self, *args, **kwargs)
except SupersetSecurityException as ex:
@@ -294,11 +292,11 @@ def json_response(obj: Any, status: int = 200) -> FlaskResponse:
)
def render_app_template(
- self, extra_bootstrap_data: Optional[dict[str, Any]] = None
+ self, extra_bootstrap_data: dict[str, Any] | None = None
) -> FlaskResponse:
payload = {
"user": bootstrap_user_data(g.user, include_perms=True),
- "common": common_bootstrap_payload(g.user),
+ "common": common_bootstrap_payload(),
**(extra_bootstrap_data or {}),
}
return self.render_template(
@@ -335,21 +333,16 @@ def get_environment_tag() -> dict[str, Any]:
def menu_data(user: User) -> dict[str, Any]:
- menu = appbuilder.menu.get_data()
+ languages = {
+ lang: {**appbuilder.languages[lang], "url": appbuilder.get_url_for_locale(lang)}
+ for lang in appbuilder.languages
+ }
- languages = {}
- for lang in appbuilder.languages:
- languages[lang] = {
- **appbuilder.languages[lang],
- "url": appbuilder.get_url_for_locale(lang),
- }
- brand_text = appbuilder.app.config["LOGO_RIGHT_TEXT"]
- if callable(brand_text):
+ if callable(brand_text := appbuilder.app.config["LOGO_RIGHT_TEXT"]):
brand_text = brand_text()
- build_number = appbuilder.app.config["BUILD_NUMBER"]
return {
- "menu": menu,
+ "menu": appbuilder.menu.get_data(),
"brand": {
"path": appbuilder.app.config["LOGO_TARGET_PATH"] or "/superset/welcome/",
"icon": appbuilder.app_icon,
@@ -369,9 +362,9 @@ def menu_data(user: User) -> dict[str, Any]:
"documentation_text": appbuilder.app.config["DOCUMENTATION_TEXT"],
"version_string": appbuilder.app.config["VERSION_STRING"],
"version_sha": appbuilder.app.config["VERSION_SHA"],
- "build_number": build_number,
+ "build_number": appbuilder.app.config["BUILD_NUMBER"],
"languages": languages,
- "show_language_picker": len(languages.keys()) > 1,
+ "show_language_picker": len(languages) > 1,
"user_is_anonymous": user.is_anonymous,
"user_info_url": None
if is_feature_enabled("MENU_HIDE_USER_INFO")
@@ -387,7 +380,9 @@ def menu_data(user: User) -> dict[str, Any]:
@cache_manager.cache.memoize(timeout=60)
-def cached_common_bootstrap_data(user: User, locale: str) -> dict[str, Any]:
+def cached_common_bootstrap_data( # pylint: disable=unused-argument
+ user_id: int | None, locale: str
+) -> dict[str, Any]:
"""Common data always sent to the client
The function is memoized as the return value only changes when user permissions
@@ -424,15 +419,15 @@ def cached_common_bootstrap_data(user: User, locale: str) -> dict[str, Any]:
"extra_sequential_color_schemes": conf["EXTRA_SEQUENTIAL_COLOR_SCHEMES"],
"extra_categorical_color_schemes": conf["EXTRA_CATEGORICAL_COLOR_SCHEMES"],
"theme_overrides": conf["THEME_OVERRIDES"],
- "menu_data": menu_data(user),
+ "menu_data": menu_data(g.user),
}
bootstrap_data.update(conf["COMMON_BOOTSTRAP_OVERRIDES_FUNC"](bootstrap_data))
return bootstrap_data
-def common_bootstrap_payload(user: User) -> dict[str, Any]:
+def common_bootstrap_payload() -> dict[str, Any]:
return {
- **cached_common_bootstrap_data(user, get_locale()),
+ **cached_common_bootstrap_data(utils.get_user_id(), get_locale()),
"flash_messages": get_flashed_messages(with_categories=True),
}
@@ -542,7 +537,7 @@ def show_unexpected_exception(ex: Exception) -> FlaskResponse:
def get_common_bootstrap_data() -> dict[str, Any]:
def serialize_bootstrap_data() -> str:
return json.dumps(
- {"common": common_bootstrap_payload(g.user)},
+ {"common": common_bootstrap_payload()},
default=utils.pessimistic_json_iso_dttm_ser,
)
@@ -560,7 +555,7 @@ class SupersetModelView(ModelView):
def render_app_template(self) -> FlaskResponse:
payload = {
"user": bootstrap_user_data(g.user, include_perms=True),
- "common": common_bootstrap_payload(g.user),
+ "common": common_bootstrap_payload(),
}
return self.render_template(
"superset/spa.html",
@@ -595,11 +590,11 @@ class YamlExportMixin: # pylint: disable=too-few-public-methods
Used on DatabaseView for cli compatibility
"""
- yaml_dict_key: Optional[str] = None
+ yaml_dict_key: str | None = None
@action("yaml_export", __("Export to YAML"), __("Export to YAML?"), "fa-download")
def yaml_export(
- self, items: Union[ImportExportMixin, list[ImportExportMixin]]
+ self, items: ImportExportMixin | list[ImportExportMixin]
) -> FlaskResponse:
if not isinstance(items, list):
items = [items]
diff --git a/superset/views/core.py b/superset/views/core.py
index 2f9b99eba0e61..9ad2f63fdc680 100755
--- a/superset/views/core.py
+++ b/superset/views/core.py
@@ -44,27 +44,26 @@
security_manager,
)
from superset.async_events.async_query_manager import AsyncQueryTokenException
-from superset.charts.commands.exceptions import ChartNotFoundError
-from superset.charts.commands.warm_up_cache import ChartWarmUpCacheCommand
+from superset.commands.chart.exceptions import ChartNotFoundError
+from superset.commands.chart.warm_up_cache import ChartWarmUpCacheCommand
+from superset.commands.dashboard.importers.v0 import ImportDashboardsCommand
+from superset.commands.dashboard.permalink.get import GetDashboardPermalinkCommand
+from superset.commands.dataset.exceptions import DatasetNotFoundError
+from superset.commands.explore.form_data.create import CreateFormDataCommand
+from superset.commands.explore.form_data.get import GetFormDataCommand
+from superset.commands.explore.form_data.parameters import CommandParameters
+from superset.commands.explore.permalink.get import GetExplorePermalinkCommand
from superset.common.chart_data import ChartDataResultFormat, ChartDataResultType
-from superset.connectors.base.models import BaseDatasource
-from superset.connectors.sqla.models import SqlaTable
+from superset.connectors.sqla.models import BaseDatasource, SqlaTable
from superset.daos.chart import ChartDAO
from superset.daos.datasource import DatasourceDAO
-from superset.dashboards.commands.importers.v0 import ImportDashboardsCommand
-from superset.dashboards.permalink.commands.get import GetDashboardPermalinkCommand
from superset.dashboards.permalink.exceptions import DashboardPermalinkGetFailedError
-from superset.datasets.commands.exceptions import DatasetNotFoundError
from superset.exceptions import (
CacheLoadError,
DatabaseNotFound,
SupersetException,
SupersetSecurityException,
)
-from superset.explore.form_data.commands.create import CreateFormDataCommand
-from superset.explore.form_data.commands.get import GetFormDataCommand
-from superset.explore.form_data.commands.parameters import CommandParameters
-from superset.explore.permalink.commands.get import GetExplorePermalinkCommand
from superset.explore.permalink.exceptions import ExplorePermalinkGetFailedError
from superset.extensions import async_query_manager, cache_manager
from superset.models.core import Database
@@ -605,7 +604,7 @@ def explore(
"force": force,
"user": bootstrap_user_data(g.user, include_perms=True),
"forced_height": request.args.get("height"),
- "common": common_bootstrap_payload(g.user),
+ "common": common_bootstrap_payload(),
}
if slc:
title = slc.slice_name
@@ -863,7 +862,7 @@ def dashboard(
bootstrap_data=json.dumps(
{
"user": bootstrap_user_data(g.user, include_perms=True),
- "common": common_bootstrap_payload(g.user),
+ "common": common_bootstrap_payload(),
},
default=utils.pessimistic_json_iso_dttm_ser,
),
@@ -954,7 +953,7 @@ def welcome(self) -> FlaskResponse:
payload = {
"user": bootstrap_user_data(g.user, include_perms=True),
- "common": common_bootstrap_payload(g.user),
+ "common": common_bootstrap_payload(),
}
return self.render_template(
diff --git a/superset/views/dashboard/views.py b/superset/views/dashboard/views.py
index ce5e8f1e07507..0b41a67ee292f 100644
--- a/superset/views/dashboard/views.py
+++ b/superset/views/dashboard/views.py
@@ -151,7 +151,7 @@ def embedded(
)
bootstrap_data = {
- "common": common_bootstrap_payload(g.user),
+ "common": common_bootstrap_payload(),
"embedded": {"dashboard_id": dashboard_id_or_slug},
}
diff --git a/superset/views/database/forms.py b/superset/views/database/forms.py
index 9e3ba500af465..f8e528c4e3b63 100644
--- a/superset/views/database/forms.py
+++ b/superset/views/database/forms.py
@@ -357,10 +357,6 @@ class ExcelToDatabaseForm(UploadToDatabaseForm):
validators=[Optional(), NumberRange(min=0)],
widget=BS3TextFieldWidget(),
)
- mangle_dupe_cols = BooleanField(
- _("Mangle Duplicate Columns"),
- description=_('Specify duplicate columns as "X.0, X.1".'),
- )
skiprows = IntegerField(
_("Skip Rows"),
description=_("Number of rows to skip at start of file."),
diff --git a/superset/views/database/validators.py b/superset/views/database/validators.py
index 2ee49c8210736..e4fef3446c055 100644
--- a/superset/views/database/validators.py
+++ b/superset/views/database/validators.py
@@ -21,7 +21,7 @@
from marshmallow import ValidationError
from superset import security_manager
-from superset.databases.commands.exceptions import DatabaseInvalidError
+from superset.commands.database.exceptions import DatabaseInvalidError
from superset.databases.utils import make_url_safe
from superset.models.core import Database
diff --git a/superset/views/database/views.py b/superset/views/database/views.py
index 0a91df2d6f087..9f54ae8b78a37 100644
--- a/superset/views/database/views.py
+++ b/superset/views/database/views.py
@@ -307,7 +307,6 @@ class ExcelToDatabaseView(SimpleFormView):
def form_get(self, form: ExcelToDatabaseForm) -> None:
form.header.data = 0
- form.mangle_dupe_cols.data = True
form.decimal.data = "."
form.if_exists.data = "fail"
form.sheet_name.data = ""
@@ -343,7 +342,7 @@ def form_post(self, form: ExcelToDatabaseForm) -> Response:
index_col=form.index_col.data,
io=form.excel_file.data,
keep_default_na=not form.null_values.data,
- na_values=form.null_values.data if form.null_values.data else None,
+ na_values=form.null_values.data if form.null_values.data else [],
parse_dates=form.parse_dates.data,
skiprows=form.skiprows.data,
sheet_name=form.sheet_name.data if form.sheet_name.data else 0,
diff --git a/superset/views/datasource/utils.py b/superset/views/datasource/utils.py
index 65b19c34938f3..b08d1ccc1528d 100644
--- a/superset/views/datasource/utils.py
+++ b/superset/views/datasource/utils.py
@@ -17,12 +17,12 @@
from typing import Any, Optional
from superset import app, db
+from superset.commands.dataset.exceptions import DatasetSamplesFailedError
from superset.common.chart_data import ChartDataResultType
from superset.common.query_context_factory import QueryContextFactory
from superset.common.utils.query_cache_manager import QueryCacheManager
from superset.constants import CacheRegion
from superset.daos.datasource import DatasourceDAO
-from superset.datasets.commands.exceptions import DatasetSamplesFailedError
from superset.utils.core import QueryStatus
from superset.views.datasource.schemas import SamplesPayloadSchema
@@ -43,7 +43,7 @@ def get_limit_clause(page: Optional[int], per_page: Optional[int]) -> dict[str,
return {"row_offset": offset, "row_limit": limit}
-def get_samples( # pylint: disable=too-many-arguments,too-many-locals
+def get_samples( # pylint: disable=too-many-arguments
datasource_type: str,
datasource_id: int,
force: bool = False,
@@ -104,21 +104,18 @@ def get_samples( # pylint: disable=too-many-arguments,too-many-locals
result_type=ChartDataResultType.FULL,
force=force,
)
- samples_results = samples_instance.get_payload()
- count_star_results = count_star_instance.get_payload()
try:
- sample_data = samples_results["queries"][0]
- count_star_data = count_star_results["queries"][0]
- failed_status = (
- sample_data.get("status") == QueryStatus.FAILED
- or count_star_data.get("status") == QueryStatus.FAILED
- )
- error_msg = sample_data.get("error") or count_star_data.get("error")
- if failed_status and error_msg:
- cache_key = sample_data.get("cache_key")
- QueryCacheManager.delete(cache_key, region=CacheRegion.DATA)
- raise DatasetSamplesFailedError(error_msg)
+ count_star_data = count_star_instance.get_payload()["queries"][0]
+
+ if count_star_data.get("status") == QueryStatus.FAILED:
+ raise DatasetSamplesFailedError(count_star_data.get("error"))
+
+ sample_data = samples_instance.get_payload()["queries"][0]
+
+ if sample_data.get("status") == QueryStatus.FAILED:
+ QueryCacheManager.delete(count_star_data.get("cache_key"), CacheRegion.DATA)
+ raise DatasetSamplesFailedError(sample_data.get("error"))
sample_data["page"] = page
sample_data["per_page"] = per_page
diff --git a/superset/views/datasource/views.py b/superset/views/datasource/views.py
index 56acbd85807e3..a4c158a11f300 100644
--- a/superset/views/datasource/views.py
+++ b/superset/views/datasource/views.py
@@ -28,14 +28,14 @@
from sqlalchemy.orm.exc import NoResultFound
from superset import db, event_logger, security_manager
+from superset.commands.dataset.exceptions import (
+ DatasetForbiddenError,
+ DatasetNotFoundError,
+)
from superset.commands.utils import populate_owners
from superset.connectors.sqla.models import SqlaTable
from superset.connectors.sqla.utils import get_physical_table_metadata
from superset.daos.datasource import DatasourceDAO
-from superset.datasets.commands.exceptions import (
- DatasetForbiddenError,
- DatasetNotFoundError,
-)
from superset.exceptions import SupersetException, SupersetSecurityException
from superset.models.core import Database
from superset.superset_typing import FlaskResponse
diff --git a/superset/viz.py b/superset/viz.py
index 2e697a77becf8..8ba785ddcf39e 100644
--- a/superset/viz.py
+++ b/superset/viz.py
@@ -84,7 +84,7 @@
if TYPE_CHECKING:
from superset.common.query_context_factory import QueryContextFactory
- from superset.connectors.base.models import BaseDatasource
+ from superset.connectors.sqla.models import BaseDatasource
config = app.config
stats_logger = config["STATS_LOGGER"]
diff --git a/tests/integration_tests/base_tests.py b/tests/integration_tests/base_tests.py
index 7f7c543d8b04a..0040ec60f68b3 100644
--- a/tests/integration_tests/base_tests.py
+++ b/tests/integration_tests/base_tests.py
@@ -36,8 +36,7 @@
from tests.integration_tests.test_app import app, login
from superset.sql_parse import CtasMethod
from superset import db, security_manager
-from superset.connectors.base.models import BaseDatasource
-from superset.connectors.sqla.models import SqlaTable
+from superset.connectors.sqla.models import BaseDatasource, SqlaTable
from superset.models import core as models
from superset.models.slice import Slice
from superset.models.core import Database
diff --git a/tests/integration_tests/charts/api_tests.py b/tests/integration_tests/charts/api_tests.py
index ae64eba8071ab..69888104fa4ef 100644
--- a/tests/integration_tests/charts/api_tests.py
+++ b/tests/integration_tests/charts/api_tests.py
@@ -28,8 +28,8 @@
from sqlalchemy import and_
from sqlalchemy.sql import func
-from superset.charts.commands.exceptions import ChartDataQueryFailedError
-from superset.charts.data.commands.get_data_command import ChartDataCommand
+from superset.commands.chart.data.get_data_command import ChartDataCommand
+from superset.commands.chart.exceptions import ChartDataQueryFailedError
from superset.connectors.sqla.models import SqlaTable
from superset.extensions import cache_manager, db, security_manager
from superset.models.core import Database, FavStar, FavStarClassName
@@ -981,7 +981,7 @@ def test_get_charts(self):
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
- self.assertEqual(data["count"], 34)
+ self.assertEqual(data["count"], 33)
@pytest.mark.usefixtures("load_energy_table_with_slice", "add_dashboard_to_chart")
def test_get_charts_dashboards(self):
@@ -1447,7 +1447,7 @@ def test_get_charts_page(self):
"""
Chart API: Test get charts filter
"""
- # Assuming we have 34 sample charts
+ # Assuming we have 33 sample charts
self.login(username="admin")
arguments = {"page_size": 10, "page": 0}
uri = f"api/v1/chart/?q={prison.dumps(arguments)}"
@@ -1461,7 +1461,7 @@ def test_get_charts_page(self):
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
- self.assertEqual(len(data["result"]), 4)
+ self.assertEqual(len(data["result"]), 3)
def test_get_charts_no_data_access(self):
"""
diff --git a/tests/integration_tests/charts/commands_tests.py b/tests/integration_tests/charts/commands_tests.py
index f9785a4dd6c20..87c7823ae5ab8 100644
--- a/tests/integration_tests/charts/commands_tests.py
+++ b/tests/integration_tests/charts/commands_tests.py
@@ -22,15 +22,15 @@
from flask import g
from superset import db, security_manager
-from superset.charts.commands.create import CreateChartCommand
-from superset.charts.commands.exceptions import (
+from superset.commands.chart.create import CreateChartCommand
+from superset.commands.chart.exceptions import (
ChartNotFoundError,
WarmUpCacheChartNotFoundError,
)
-from superset.charts.commands.export import ExportChartsCommand
-from superset.charts.commands.importers.v1 import ImportChartsCommand
-from superset.charts.commands.update import UpdateChartCommand
-from superset.charts.commands.warm_up_cache import ChartWarmUpCacheCommand
+from superset.commands.chart.export import ExportChartsCommand
+from superset.commands.chart.importers.v1 import ImportChartsCommand
+from superset.commands.chart.update import UpdateChartCommand
+from superset.commands.chart.warm_up_cache import ChartWarmUpCacheCommand
from superset.commands.exceptions import CommandInvalidError
from superset.commands.importers.exceptions import IncorrectVersionError
from superset.connectors.sqla.models import SqlaTable
@@ -171,7 +171,7 @@ def test_export_chart_command_no_related(self, mock_g):
class TestImportChartsCommand(SupersetTestCase):
- @patch("superset.charts.commands.importers.v1.utils.g")
+ @patch("superset.commands.chart.importers.v1.utils.g")
@patch("superset.security.manager.g")
def test_import_v1_chart(self, sm_g, utils_g):
"""Test that we can import a chart"""
@@ -324,7 +324,7 @@ def test_import_v1_chart_validation(self):
class TestChartsCreateCommand(SupersetTestCase):
@patch("superset.utils.core.g")
- @patch("superset.charts.commands.create.g")
+ @patch("superset.commands.chart.create.g")
@patch("superset.security.manager.g")
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_create_v1_response(self, mock_sm_g, mock_c_g, mock_u_g):
@@ -354,7 +354,7 @@ def test_create_v1_response(self, mock_sm_g, mock_c_g, mock_u_g):
class TestChartsUpdateCommand(SupersetTestCase):
- @patch("superset.charts.commands.update.g")
+ @patch("superset.commands.chart.update.g")
@patch("superset.utils.core.g")
@patch("superset.security.manager.g")
@pytest.mark.usefixtures("load_energy_table_with_slice")
diff --git a/tests/integration_tests/charts/data/api_tests.py b/tests/integration_tests/charts/data/api_tests.py
index 32a4be160c90c..4def03ff4e484 100644
--- a/tests/integration_tests/charts/data/api_tests.py
+++ b/tests/integration_tests/charts/data/api_tests.py
@@ -42,7 +42,7 @@
import pytest
from superset.models.slice import Slice
-from superset.charts.data.commands.get_data_command import ChartDataCommand
+from superset.commands.chart.data.get_data_command import ChartDataCommand
from superset.connectors.sqla.models import TableColumn, SqlaTable
from superset.errors import SupersetErrorType
from superset.extensions import async_query_manager_factory, db
@@ -1293,7 +1293,6 @@ def test_chart_cache_timeout(
slice_with_cache_timeout = load_energy_table_with_slice[0]
slice_with_cache_timeout.cache_timeout = 20
- db.session.merge(slice_with_cache_timeout)
datasource: SqlaTable = (
db.session.query(SqlaTable)
@@ -1301,7 +1300,6 @@ def test_chart_cache_timeout(
.first()
)
datasource.cache_timeout = 1254
- db.session.merge(datasource)
db.session.commit()
@@ -1331,7 +1329,6 @@ def test_chart_cache_timeout_not_present(
.first()
)
datasource.cache_timeout = 1980
- db.session.merge(datasource)
db.session.commit()
rv = test_client.post(CHART_DATA_URI, json=physical_query_context)
diff --git a/tests/integration_tests/cli_tests.py b/tests/integration_tests/cli_tests.py
index f9195a6c26684..55557ab32deac 100644
--- a/tests/integration_tests/cli_tests.py
+++ b/tests/integration_tests/cli_tests.py
@@ -137,7 +137,7 @@ def test_export_dashboards_versioned_export(app_context, fs):
"superset.cli.lib.feature_flags", {"VERSIONED_EXPORT": True}, clear=True
)
@mock.patch(
- "superset.dashboards.commands.export.ExportDashboardsCommand.run",
+ "superset.commands.dashboard.export.ExportDashboardsCommand.run",
side_effect=Exception(),
)
def test_failing_export_dashboards_versioned_export(
@@ -191,7 +191,7 @@ def test_export_datasources_versioned_export(app_context, fs):
"superset.cli.lib.feature_flags", {"VERSIONED_EXPORT": True}, clear=True
)
@mock.patch(
- "superset.dashboards.commands.export.ExportDatasetsCommand.run",
+ "superset.commands.dashboard.export.ExportDatasetsCommand.run",
side_effect=Exception(),
)
def test_failing_export_datasources_versioned_export(
@@ -217,7 +217,7 @@ def test_failing_export_datasources_versioned_export(
@mock.patch.dict(
"superset.cli.lib.feature_flags", {"VERSIONED_EXPORT": True}, clear=True
)
-@mock.patch("superset.dashboards.commands.importers.dispatcher.ImportDashboardsCommand")
+@mock.patch("superset.commands.dashboard.importers.dispatcher.ImportDashboardsCommand")
def test_import_dashboards_versioned_export(import_dashboards_command, app_context, fs):
"""
Test that both ZIP and JSON can be imported.
@@ -261,7 +261,7 @@ def test_import_dashboards_versioned_export(import_dashboards_command, app_conte
"superset.cli.lib.feature_flags", {"VERSIONED_EXPORT": True}, clear=True
)
@mock.patch(
- "superset.dashboards.commands.importers.dispatcher.ImportDashboardsCommand.run",
+ "superset.commands.dashboard.importers.dispatcher.ImportDashboardsCommand.run",
side_effect=Exception(),
)
def test_failing_import_dashboards_versioned_export(
@@ -304,7 +304,7 @@ def test_failing_import_dashboards_versioned_export(
@mock.patch.dict(
"superset.cli.lib.feature_flags", {"VERSIONED_EXPORT": True}, clear=True
)
-@mock.patch("superset.datasets.commands.importers.dispatcher.ImportDatasetsCommand")
+@mock.patch("superset.commands.dataset.importers.dispatcher.ImportDatasetsCommand")
def test_import_datasets_versioned_export(import_datasets_command, app_context, fs):
"""
Test that both ZIP and YAML can be imported.
@@ -347,7 +347,7 @@ def test_import_datasets_versioned_export(import_datasets_command, app_context,
@mock.patch.dict(
"superset.cli.lib.feature_flags", {"VERSIONED_EXPORT": False}, clear=True
)
-@mock.patch("superset.datasets.commands.importers.v0.ImportDatasetsCommand")
+@mock.patch("superset.commands.dataset.importers.v0.ImportDatasetsCommand")
def test_import_datasets_sync_argument_columns_metrics(
import_datasets_command, app_context, fs
):
@@ -384,7 +384,7 @@ def test_import_datasets_sync_argument_columns_metrics(
@mock.patch.dict(
"superset.cli.lib.feature_flags", {"VERSIONED_EXPORT": False}, clear=True
)
-@mock.patch("superset.datasets.commands.importers.v0.ImportDatasetsCommand")
+@mock.patch("superset.commands.dataset.importers.v0.ImportDatasetsCommand")
def test_import_datasets_sync_argument_columns(
import_datasets_command, app_context, fs
):
@@ -421,7 +421,7 @@ def test_import_datasets_sync_argument_columns(
@mock.patch.dict(
"superset.cli.lib.feature_flags", {"VERSIONED_EXPORT": False}, clear=True
)
-@mock.patch("superset.datasets.commands.importers.v0.ImportDatasetsCommand")
+@mock.patch("superset.commands.dataset.importers.v0.ImportDatasetsCommand")
def test_import_datasets_sync_argument_metrics(
import_datasets_command, app_context, fs
):
@@ -459,7 +459,7 @@ def test_import_datasets_sync_argument_metrics(
"superset.cli.lib.feature_flags", {"VERSIONED_EXPORT": True}, clear=True
)
@mock.patch(
- "superset.datasets.commands.importers.dispatcher.ImportDatasetsCommand.run",
+ "superset.commands.dataset.importers.dispatcher.ImportDatasetsCommand.run",
side_effect=Exception(),
)
def test_failing_import_datasets_versioned_export(
diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py
index 28da7b79133b1..3e6aa963072b1 100644
--- a/tests/integration_tests/conftest.py
+++ b/tests/integration_tests/conftest.py
@@ -326,7 +326,8 @@ def virtual_dataset():
TableColumn(column_name="col5", type="VARCHAR(255)", table=dataset)
SqlMetric(metric_name="count", expression="count(*)", table=dataset)
- db.session.merge(dataset)
+ db.session.add(dataset)
+ db.session.commit()
yield dataset
@@ -390,7 +391,7 @@ def physical_dataset():
table=dataset,
)
SqlMetric(metric_name="count", expression="count(*)", table=dataset)
- db.session.merge(dataset)
+ db.session.add(dataset)
db.session.commit()
yield dataset
@@ -425,7 +426,8 @@ def virtual_dataset_comma_in_column_value():
TableColumn(column_name="col2", type="VARCHAR(255)", table=dataset)
SqlMetric(metric_name="count", expression="count(*)", table=dataset)
- db.session.merge(dataset)
+ db.session.add(dataset)
+ db.session.commit()
yield dataset
diff --git a/tests/integration_tests/core_tests.py b/tests/integration_tests/core_tests.py
index 3157ddd649e34..c4a0897332b38 100644
--- a/tests/integration_tests/core_tests.py
+++ b/tests/integration_tests/core_tests.py
@@ -35,8 +35,8 @@
import superset.utils.database
import superset.views.utils
from superset import dataframe, db, security_manager, sql_lab
-from superset.charts.commands.exceptions import ChartDataQueryFailedError
-from superset.charts.data.commands.get_data_command import ChartDataCommand
+from superset.commands.chart.data.get_data_command import ChartDataCommand
+from superset.commands.chart.exceptions import ChartDataQueryFailedError
from superset.common.db_query_status import QueryStatus
from superset.connectors.sqla.models import SqlaTable
from superset.db_engine_specs.base import BaseEngineSpec
@@ -713,10 +713,17 @@ def test_explore_json_async(self):
data = json.loads(rv.data.decode("utf-8"))
keys = list(data.keys())
- self.assertEqual(rv.status_code, 202)
- self.assertCountEqual(
- keys, ["channel_id", "job_id", "user_id", "status", "errors", "result_url"]
- )
+ # If chart is cached, it will return 200, otherwise 202
+ assert rv.status_code in {200, 202}
+ if rv.status_code == 202:
+ assert keys == [
+ "channel_id",
+ "job_id",
+ "user_id",
+ "status",
+ "errors",
+ "result_url",
+ ]
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch.dict(
@@ -1164,7 +1171,7 @@ def test_dashboard_injected_exceptions(self, mock_db_connection_mutator):
self.assertIn("Error message", data)
@pytest.mark.usefixtures("load_energy_table_with_slice")
- @mock.patch("superset.explore.form_data.commands.create.CreateFormDataCommand.run")
+ @mock.patch("superset.commands.explore.form_data.create.CreateFormDataCommand.run")
def test_explore_redirect(self, mock_command: mock.Mock):
self.login(username="admin")
random_key = "random_key"
diff --git a/tests/integration_tests/css_templates/api_tests.py b/tests/integration_tests/css_templates/api_tests.py
index b28cca955ca8d..ceb46f553b1aa 100644
--- a/tests/integration_tests/css_templates/api_tests.py
+++ b/tests/integration_tests/css_templates/api_tests.py
@@ -19,6 +19,8 @@
import json
import pytest
import prison
+from datetime import datetime
+from freezegun import freeze_time
from sqlalchemy.sql import func
import tests.integration_tests.test_app
@@ -189,20 +191,27 @@ def test_get_css_template(self):
"""
CSS Template API: Test get CSS Template
"""
- css_template = (
- db.session.query(CssTemplate)
- .filter(CssTemplate.template_name == "template_name1")
- .one_or_none()
- )
- self.login(username="admin")
- uri = f"api/v1/css_template/{css_template.id}"
- rv = self.get_assert_metric(uri, "get")
+ with freeze_time(datetime.now()):
+ css_template = (
+ db.session.query(CssTemplate)
+ .filter(CssTemplate.template_name == "template_name1")
+ .one_or_none()
+ )
+ self.login(username="admin")
+ uri = f"api/v1/css_template/{css_template.id}"
+ rv = self.get_assert_metric(uri, "get")
assert rv.status_code == 200
expected_result = {
"id": css_template.id,
"template_name": "template_name1",
"css": "css1",
+ "changed_by": {
+ "first_name": css_template.created_by.first_name,
+ "id": css_template.created_by.id,
+ "last_name": css_template.created_by.last_name,
+ },
+ "changed_on_delta_humanized": "now",
"created_by": {
"first_name": css_template.created_by.first_name,
"id": css_template.created_by.id,
diff --git a/tests/integration_tests/csv_upload_tests.py b/tests/integration_tests/csv_upload_tests.py
index 9bc204ff06b45..741f4c1bc952c 100644
--- a/tests/integration_tests/csv_upload_tests.py
+++ b/tests/integration_tests/csv_upload_tests.py
@@ -165,7 +165,6 @@ def upload_excel(
"sheet_name": "Sheet1",
"if_exists": "fail",
"index_label": "test_label",
- "mangle_dupe_cols": False,
}
if schema := utils.get_example_default_schema():
form_data["schema"] = schema
diff --git a/tests/integration_tests/dashboard_tests.py b/tests/integration_tests/dashboard_tests.py
index 0df9b22267386..0275152231e69 100644
--- a/tests/integration_tests/dashboard_tests.py
+++ b/tests/integration_tests/dashboard_tests.py
@@ -78,8 +78,8 @@ def load_dashboard(self):
hidden_dash.slices = [slice]
hidden_dash.published = False
- db.session.merge(published_dash)
- db.session.merge(hidden_dash)
+ db.session.add(published_dash)
+ db.session.add(hidden_dash)
yield db.session.commit()
self.revoke_public_access_to_table(table)
@@ -137,8 +137,6 @@ def test_public_user_dashboard_access(self):
# Make the births dash published so it can be seen
births_dash = db.session.query(Dashboard).filter_by(slug="births").one()
births_dash.published = True
-
- db.session.merge(births_dash)
db.session.commit()
# Try access before adding appropriate permissions.
@@ -180,7 +178,6 @@ def test_dashboard_with_created_by_can_be_accessed_by_public_users(self):
dash = db.session.query(Dashboard).filter_by(slug="births").first()
dash.owners = [security_manager.find_user("admin")]
dash.created_by = security_manager.find_user("admin")
- db.session.merge(dash)
db.session.commit()
res: Response = self.client.get("/superset/dashboard/births/")
diff --git a/tests/integration_tests/dashboard_utils.py b/tests/integration_tests/dashboard_utils.py
index c08a3ec292b33..41dd8dc97812e 100644
--- a/tests/integration_tests/dashboard_utils.py
+++ b/tests/integration_tests/dashboard_utils.py
@@ -59,11 +59,11 @@ def create_table_metadata(
normalize_columns=False,
always_filter_main_dttm=False,
)
+ db.session.add(table)
if fetch_values_predicate:
table.fetch_values_predicate = fetch_values_predicate
table.database = database
table.description = table_description
- db.session.merge(table)
db.session.commit()
return table
diff --git a/tests/integration_tests/dashboards/api_tests.py b/tests/integration_tests/dashboards/api_tests.py
index cc7bc109b4561..a5c44f9f08764 100644
--- a/tests/integration_tests/dashboards/api_tests.py
+++ b/tests/integration_tests/dashboards/api_tests.py
@@ -176,6 +176,26 @@ def test_get_dashboard_datasets(self):
expected_values = [0, 1] if backend() == "presto" else [0, 1, 2]
self.assertEqual(result[0]["column_types"], expected_values)
+ @pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
+ @patch("superset.dashboards.schemas.security_manager.has_guest_access")
+ @patch("superset.dashboards.schemas.security_manager.is_guest_user")
+ def test_get_dashboard_datasets_as_guest(self, is_guest_user, has_guest_access):
+ self.login(username="admin")
+ uri = "api/v1/dashboard/world_health/datasets"
+ is_guest_user = True
+ has_guest_access = True
+ response = self.get_assert_metric(uri, "get_datasets")
+ self.assertEqual(response.status_code, 200)
+ data = json.loads(response.data.decode("utf-8"))
+ dashboard = Dashboard.get("world_health")
+ expected_dataset_ids = {s.datasource_id for s in dashboard.slices}
+ result = data["result"]
+ actual_dataset_ids = {dataset["id"] for dataset in result}
+ self.assertEqual(actual_dataset_ids, expected_dataset_ids)
+ for dataset in result:
+ for excluded_key in ["database", "owners"]:
+ assert excluded_key not in dataset
+
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_get_dashboard_datasets_not_found(self):
self.login(username="alpha")
@@ -409,6 +429,29 @@ def test_get_dashboard(self):
db.session.delete(dashboard)
db.session.commit()
+ @patch("superset.dashboards.schemas.security_manager.has_guest_access")
+ @patch("superset.dashboards.schemas.security_manager.is_guest_user")
+ def test_get_dashboard_as_guest(self, is_guest_user, has_guest_access):
+ """
+ Dashboard API: Test get dashboard as guest
+ """
+ admin = self.get_user("admin")
+ dashboard = self.insert_dashboard(
+ "title", "slug1", [admin.id], created_by=admin
+ )
+ is_guest_user.return_value = True
+ has_guest_access.return_value = True
+ self.login(username="admin")
+ uri = f"api/v1/dashboard/{dashboard.id}"
+ rv = self.get_assert_metric(uri, "get")
+ self.assertEqual(rv.status_code, 200)
+ data = json.loads(rv.data.decode("utf-8"))
+ for excluded_key in ["changed_by", "changed_by_name", "owners"]:
+ assert excluded_key not in data["result"]
+ # rollback changes
+ db.session.delete(dashboard)
+ db.session.commit()
+
def test_info_dashboard(self):
"""
Dashboard API: Test info
diff --git a/tests/integration_tests/dashboards/commands_tests.py b/tests/integration_tests/dashboards/commands_tests.py
index 75bdd17bcf158..175a8a3198da6 100644
--- a/tests/integration_tests/dashboards/commands_tests.py
+++ b/tests/integration_tests/dashboards/commands_tests.py
@@ -23,16 +23,16 @@
from werkzeug.utils import secure_filename
from superset import db, security_manager
-from superset.commands.exceptions import CommandInvalidError
-from superset.commands.importers.exceptions import IncorrectVersionError
-from superset.connectors.sqla.models import SqlaTable
-from superset.dashboards.commands.exceptions import DashboardNotFoundError
-from superset.dashboards.commands.export import (
+from superset.commands.dashboard.exceptions import DashboardNotFoundError
+from superset.commands.dashboard.export import (
append_charts,
ExportDashboardsCommand,
get_default_position,
)
-from superset.dashboards.commands.importers import v0, v1
+from superset.commands.dashboard.importers import v0, v1
+from superset.commands.exceptions import CommandInvalidError
+from superset.commands.importers.exceptions import IncorrectVersionError
+from superset.connectors.sqla.models import SqlaTable
from superset.models.core import Database
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
@@ -97,17 +97,11 @@ def test_export_dashboard_command(self, mock_g1, mock_g2):
"published": False,
"uuid": str(example_dashboard.uuid),
"position": {
- "CHART-36bfc934": {
- "children": [],
- "id": "CHART-36bfc934",
- "meta": {"height": 25, "sliceName": "Region Filter", "width": 2},
- "type": "CHART",
- },
"CHART-37982887": {
"children": [],
"id": "CHART-37982887",
"meta": {
- "height": 25,
+ "height": 52,
"sliceName": "World's Population",
"width": 2,
},
@@ -180,7 +174,7 @@ def test_export_dashboard_command(self, mock_g1, mock_g2):
"type": "COLUMN",
},
"COLUMN-fe3914b8": {
- "children": ["CHART-36bfc934", "CHART-37982887"],
+ "children": ["CHART-37982887"],
"id": "COLUMN-fe3914b8",
"meta": {"background": "BACKGROUND_TRANSPARENT", "width": 2},
"type": "COLUMN",
@@ -292,14 +286,16 @@ def test_export_dashboard_command_key_order(self, mock_g1, mock_g2):
]
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
- @patch("superset.dashboards.commands.export.suffix")
+ @patch("superset.commands.dashboard.export.suffix")
def test_append_charts(self, mock_suffix):
"""Test that orphaned charts are added to the dashboard position"""
# return deterministic IDs
mock_suffix.side_effect = (str(i) for i in itertools.count(1))
position = get_default_position("example")
- chart_1 = db.session.query(Slice).filter_by(slice_name="Region Filter").one()
+ chart_1 = (
+ db.session.query(Slice).filter_by(slice_name="World's Population").one()
+ )
new_position = append_charts(position, {chart_1})
assert new_position == {
"DASHBOARD_VERSION_KEY": "v2",
@@ -328,7 +324,7 @@ def test_append_charts(self, mock_suffix):
"meta": {
"chartId": chart_1.id,
"height": 50,
- "sliceName": "Region Filter",
+ "sliceName": "World's Population",
"uuid": str(chart_1.uuid),
"width": 4,
},
@@ -375,7 +371,7 @@ def test_append_charts(self, mock_suffix):
"meta": {
"chartId": chart_1.id,
"height": 50,
- "sliceName": "Region Filter",
+ "sliceName": "World's Population",
"uuid": str(chart_1.uuid),
"width": 4,
},
@@ -406,7 +402,7 @@ def test_append_charts(self, mock_suffix):
"meta": {
"chartId": chart_1.id,
"height": 50,
- "sliceName": "Region Filter",
+ "sliceName": "World's Population",
"uuid": str(chart_1.uuid),
"width": 4,
},
@@ -490,7 +486,7 @@ def test_import_v0_dashboard_cli_export(self):
db.session.delete(dataset)
db.session.commit()
- @patch("superset.dashboards.commands.importers.v1.utils.g")
+ @patch("superset.commands.dashboard.importers.v1.utils.g")
@patch("superset.security.manager.g")
def test_import_v1_dashboard(self, sm_g, utils_g):
"""Test that we can import a dashboard"""
diff --git a/tests/integration_tests/dashboards/dao_tests.py b/tests/integration_tests/dashboards/dao_tests.py
index 91e27af3b65ea..65fc9e32dd534 100644
--- a/tests/integration_tests/dashboards/dao_tests.py
+++ b/tests/integration_tests/dashboards/dao_tests.py
@@ -33,60 +33,6 @@
class TestDashboardDAO(SupersetTestCase):
- @pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
- def test_set_dash_metadata(self):
- dash: Dashboard = (
- db.session.query(Dashboard).filter_by(slug="world_health").first()
- )
- data = dash.data
- positions = data["position_json"]
- data.update({"positions": positions})
- original_data = copy.deepcopy(data)
-
- # add filter scopes
- filter_slice = next(slc for slc in dash.slices if slc.viz_type == "filter_box")
- immune_slices = [slc for slc in dash.slices if slc != filter_slice]
- filter_scopes = {
- str(filter_slice.id): {
- "region": {
- "scope": ["ROOT_ID"],
- "immune": [slc.id for slc in immune_slices],
- }
- }
- }
- data.update({"filter_scopes": json.dumps(filter_scopes)})
- DashboardDAO.set_dash_metadata(dash, data)
- updated_metadata = json.loads(dash.json_metadata)
- self.assertEqual(updated_metadata["filter_scopes"], filter_scopes)
-
- # remove a slice and change slice ids (as copy slices)
- removed_slice = immune_slices.pop()
- removed_components = [
- key
- for (key, value) in positions.items()
- if isinstance(value, dict)
- and value.get("type") == "CHART"
- and value["meta"]["chartId"] == removed_slice.id
- ]
- for component_id in removed_components:
- del positions[component_id]
-
- data.update({"positions": positions})
- DashboardDAO.set_dash_metadata(dash, data)
- updated_metadata = json.loads(dash.json_metadata)
- expected_filter_scopes = {
- str(filter_slice.id): {
- "region": {
- "scope": ["ROOT_ID"],
- "immune": [slc.id for slc in immune_slices],
- }
- }
- }
- self.assertEqual(updated_metadata["filter_scopes"], expected_filter_scopes)
-
- # reset dash to original data
- DashboardDAO.set_dash_metadata(dash, original_data)
-
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@patch("superset.utils.core.g")
@patch("superset.security.manager.g")
@@ -113,7 +59,6 @@ def test_get_dashboard_changed_on(self, mock_sm_g, mock_g):
data.update({"foo": "bar"})
DashboardDAO.set_dash_metadata(dashboard, data)
- db.session.merge(dashboard)
db.session.commit()
new_changed_on = DashboardDAO.get_dashboard_changed_on(dashboard)
assert old_changed_on.replace(microsecond=0) < new_changed_on
@@ -125,7 +70,6 @@ def test_get_dashboard_changed_on(self, mock_sm_g, mock_g):
)
DashboardDAO.set_dash_metadata(dashboard, original_data)
- db.session.merge(dashboard)
db.session.commit()
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
diff --git a/tests/integration_tests/dashboards/dashboard_test_utils.py b/tests/integration_tests/dashboards/dashboard_test_utils.py
index ee8001cdba78f..39bce02caa37c 100644
--- a/tests/integration_tests/dashboards/dashboard_test_utils.py
+++ b/tests/integration_tests/dashboards/dashboard_test_utils.py
@@ -110,12 +110,10 @@ def random_str():
def grant_access_to_dashboard(dashboard, role_name):
role = security_manager.find_role(role_name)
dashboard.roles.append(role)
- db.session.merge(dashboard)
db.session.commit()
def revoke_access_to_dashboard(dashboard, role_name):
role = security_manager.find_role(role_name)
dashboard.roles.remove(role)
- db.session.merge(dashboard)
db.session.commit()
diff --git a/tests/integration_tests/dashboards/filter_state/api_tests.py b/tests/integration_tests/dashboards/filter_state/api_tests.py
index 15b479686a4ec..3538e14012f23 100644
--- a/tests/integration_tests/dashboards/filter_state/api_tests.py
+++ b/tests/integration_tests/dashboards/filter_state/api_tests.py
@@ -22,10 +22,10 @@
from flask_appbuilder.security.sqla.models import User
from sqlalchemy.orm import Session
-from superset.dashboards.commands.exceptions import DashboardAccessDeniedError
+from superset.commands.dashboard.exceptions import DashboardAccessDeniedError
+from superset.commands.temporary_cache.entry import Entry
from superset.extensions import cache_manager
from superset.models.dashboard import Dashboard
-from superset.temporary_cache.commands.entry import Entry
from superset.temporary_cache.utils import cache_key
from tests.integration_tests.fixtures.world_bank_dashboard import (
load_world_bank_dashboard_with_slices,
diff --git a/tests/integration_tests/dashboards/permalink/api_tests.py b/tests/integration_tests/dashboards/permalink/api_tests.py
index 3c560a4469d4b..a49f1e6f4c0d0 100644
--- a/tests/integration_tests/dashboards/permalink/api_tests.py
+++ b/tests/integration_tests/dashboards/permalink/api_tests.py
@@ -23,7 +23,7 @@
from sqlalchemy.orm import Session
from superset import db
-from superset.dashboards.commands.exceptions import DashboardAccessDeniedError
+from superset.commands.dashboard.exceptions import DashboardAccessDeniedError
from superset.key_value.models import KeyValueEntry
from superset.key_value.types import KeyValueResource
from superset.key_value.utils import decode_permalink_id
diff --git a/tests/integration_tests/dashboards/security/security_dataset_tests.py b/tests/integration_tests/dashboards/security/security_dataset_tests.py
index 54e8b81442970..4ccfa981b197a 100644
--- a/tests/integration_tests/dashboards/security/security_dataset_tests.py
+++ b/tests/integration_tests/dashboards/security/security_dataset_tests.py
@@ -61,8 +61,8 @@ def load_dashboard(self):
hidden_dash.slices = [slice]
hidden_dash.published = False
- db.session.merge(published_dash)
- db.session.merge(hidden_dash)
+ db.session.add(published_dash)
+ db.session.add(hidden_dash)
yield db.session.commit()
self.revoke_public_access_to_table(table)
@@ -192,4 +192,4 @@ def test_get_dashboards_api_no_data_access(self):
self.assert200(rv)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(0, data["count"])
- DashboardDAO.delete(dashboard)
+ DashboardDAO.delete([dashboard])
diff --git a/tests/integration_tests/dashboards/security/security_rbac_tests.py b/tests/integration_tests/dashboards/security/security_rbac_tests.py
index 8b7f2ad1ef055..792c9d1716d31 100644
--- a/tests/integration_tests/dashboards/security/security_rbac_tests.py
+++ b/tests/integration_tests/dashboards/security/security_rbac_tests.py
@@ -21,8 +21,8 @@
import pytest
+from superset.commands.dashboard.exceptions import DashboardForbiddenError
from superset.daos.dashboard import DashboardDAO
-from superset.dashboards.commands.exceptions import DashboardForbiddenError
from superset.utils.core import backend, override_user
from tests.integration_tests.conftest import with_feature_flags
from tests.integration_tests.dashboards.dashboard_test_utils import *
diff --git a/tests/integration_tests/databases/api_tests.py b/tests/integration_tests/databases/api_tests.py
index cbdacc8f34dac..0bc1f245a1f7b 100644
--- a/tests/integration_tests/databases/api_tests.py
+++ b/tests/integration_tests/databases/api_tests.py
@@ -197,6 +197,7 @@ def test_get_items(self):
"allows_subquery",
"allows_virtual_table_explore",
"backend",
+ "changed_by",
"changed_on",
"changed_on_delta_humanized",
"created_by",
@@ -288,9 +289,9 @@ def test_create_database(self):
db.session.commit()
@mock.patch(
- "superset.databases.commands.test_connection.TestConnectionDatabaseCommand.run",
+ "superset.commands.database.test_connection.TestConnectionDatabaseCommand.run",
)
- @mock.patch("superset.databases.commands.create.is_feature_enabled")
+ @mock.patch("superset.commands.database.create.is_feature_enabled")
@mock.patch(
"superset.models.core.Database.get_all_schema_names",
)
@@ -336,10 +337,10 @@ def test_create_database_with_ssh_tunnel(
db.session.commit()
@mock.patch(
- "superset.databases.commands.test_connection.TestConnectionDatabaseCommand.run",
+ "superset.commands.database.test_connection.TestConnectionDatabaseCommand.run",
)
- @mock.patch("superset.databases.commands.create.is_feature_enabled")
- @mock.patch("superset.databases.commands.update.is_feature_enabled")
+ @mock.patch("superset.commands.database.create.is_feature_enabled")
+ @mock.patch("superset.commands.database.update.is_feature_enabled")
@mock.patch(
"superset.models.core.Database.get_all_schema_names",
)
@@ -397,10 +398,10 @@ def test_update_database_with_ssh_tunnel(
db.session.commit()
@mock.patch(
- "superset.databases.commands.test_connection.TestConnectionDatabaseCommand.run",
+ "superset.commands.database.test_connection.TestConnectionDatabaseCommand.run",
)
- @mock.patch("superset.databases.commands.create.is_feature_enabled")
- @mock.patch("superset.databases.commands.update.is_feature_enabled")
+ @mock.patch("superset.commands.database.create.is_feature_enabled")
+ @mock.patch("superset.commands.database.update.is_feature_enabled")
@mock.patch(
"superset.models.core.Database.get_all_schema_names",
)
@@ -477,12 +478,12 @@ def test_update_ssh_tunnel_via_database_api(
db.session.commit()
@mock.patch(
- "superset.databases.commands.test_connection.TestConnectionDatabaseCommand.run",
+ "superset.commands.database.test_connection.TestConnectionDatabaseCommand.run",
)
@mock.patch(
"superset.models.core.Database.get_all_schema_names",
)
- @mock.patch("superset.databases.commands.create.is_feature_enabled")
+ @mock.patch("superset.commands.database.create.is_feature_enabled")
def test_cascade_delete_ssh_tunnel(
self,
mock_test_connection_database_command_run,
@@ -531,9 +532,9 @@ def test_cascade_delete_ssh_tunnel(
assert model_ssh_tunnel is None
@mock.patch(
- "superset.databases.commands.test_connection.TestConnectionDatabaseCommand.run",
+ "superset.commands.database.test_connection.TestConnectionDatabaseCommand.run",
)
- @mock.patch("superset.databases.commands.create.is_feature_enabled")
+ @mock.patch("superset.commands.database.create.is_feature_enabled")
@mock.patch(
"superset.models.core.Database.get_all_schema_names",
)
@@ -582,9 +583,9 @@ def test_do_not_create_database_if_ssh_tunnel_creation_fails(
assert model is None
@mock.patch(
- "superset.databases.commands.test_connection.TestConnectionDatabaseCommand.run",
+ "superset.commands.database.test_connection.TestConnectionDatabaseCommand.run",
)
- @mock.patch("superset.databases.commands.create.is_feature_enabled")
+ @mock.patch("superset.commands.database.create.is_feature_enabled")
@mock.patch(
"superset.models.core.Database.get_all_schema_names",
)
@@ -637,7 +638,7 @@ def test_get_database_returns_related_ssh_tunnel(
db.session.commit()
@mock.patch(
- "superset.databases.commands.test_connection.TestConnectionDatabaseCommand.run",
+ "superset.commands.database.test_connection.TestConnectionDatabaseCommand.run",
)
@mock.patch(
"superset.models.core.Database.get_all_schema_names",
@@ -2005,10 +2006,10 @@ def test_test_connection_unsafe_uri(self):
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = False
@mock.patch(
- "superset.databases.commands.test_connection.DatabaseDAO.build_db_for_connection_test",
+ "superset.commands.database.test_connection.DatabaseDAO.build_db_for_connection_test",
)
@mock.patch(
- "superset.databases.commands.test_connection.event_logger",
+ "superset.commands.database.test_connection.event_logger",
)
def test_test_connection_failed_invalid_hostname(
self, mock_event_logger, mock_build_db
@@ -2074,7 +2075,7 @@ def test_get_database_related_objects(self):
rv = self.get_assert_metric(uri, "related_objects")
self.assertEqual(rv.status_code, 200)
response = json.loads(rv.data.decode("utf-8"))
- self.assertEqual(response["charts"]["count"], 34)
+ self.assertEqual(response["charts"]["count"], 33)
self.assertEqual(response["dashboards"]["count"], 3)
def test_get_database_related_objects_not_found(self):
@@ -3748,7 +3749,7 @@ def test_validate_sql_endpoint_noconfig(self):
},
)
- @patch("superset.databases.commands.validate_sql.get_validator_by_name")
+ @patch("superset.commands.database.validate_sql.get_validator_by_name")
@patch.dict(
"superset.config.SQL_VALIDATORS_BY_ENGINE",
PRESTO_SQL_VALIDATORS_BY_ENGINE,
diff --git a/tests/integration_tests/databases/commands_tests.py b/tests/integration_tests/databases/commands_tests.py
index d5946d8b6d105..b46e1b7ea3a1a 100644
--- a/tests/integration_tests/databases/commands_tests.py
+++ b/tests/integration_tests/databases/commands_tests.py
@@ -23,11 +23,8 @@
from sqlalchemy.exc import DBAPIError
from superset import db, event_logger, security_manager
-from superset.commands.exceptions import CommandInvalidError
-from superset.commands.importers.exceptions import IncorrectVersionError
-from superset.connectors.sqla.models import SqlaTable
-from superset.databases.commands.create import CreateDatabaseCommand
-from superset.databases.commands.exceptions import (
+from superset.commands.database.create import CreateDatabaseCommand
+from superset.commands.database.exceptions import (
DatabaseInvalidError,
DatabaseNotFoundError,
DatabaseSecurityUnsafeError,
@@ -35,11 +32,14 @@
DatabaseTestConnectionDriverError,
DatabaseTestConnectionUnexpectedError,
)
-from superset.databases.commands.export import ExportDatabasesCommand
-from superset.databases.commands.importers.v1 import ImportDatabasesCommand
-from superset.databases.commands.tables import TablesDatabaseCommand
-from superset.databases.commands.test_connection import TestConnectionDatabaseCommand
-from superset.databases.commands.validate import ValidateDatabaseParametersCommand
+from superset.commands.database.export import ExportDatabasesCommand
+from superset.commands.database.importers.v1 import ImportDatabasesCommand
+from superset.commands.database.tables import TablesDatabaseCommand
+from superset.commands.database.test_connection import TestConnectionDatabaseCommand
+from superset.commands.database.validate import ValidateDatabaseParametersCommand
+from superset.commands.exceptions import CommandInvalidError
+from superset.commands.importers.exceptions import IncorrectVersionError
+from superset.connectors.sqla.models import SqlaTable
from superset.databases.schemas import DatabaseTestConnectionSchema
from superset.databases.ssh_tunnel.models import SSHTunnel
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
@@ -75,7 +75,7 @@
class TestCreateDatabaseCommand(SupersetTestCase):
- @patch("superset.databases.commands.test_connection.event_logger.log_with_context")
+ @patch("superset.commands.database.test_connection.event_logger.log_with_context")
@patch("superset.utils.core.g")
def test_create_duplicate_error(self, mock_g, mock_logger):
example_db = get_example_database()
@@ -94,7 +94,7 @@ def test_create_duplicate_error(self, mock_g, mock_logger):
"DatabaseRequiredFieldValidationError"
)
- @patch("superset.databases.commands.test_connection.event_logger.log_with_context")
+ @patch("superset.commands.database.test_connection.event_logger.log_with_context")
@patch("superset.utils.core.g")
def test_multiple_error_logging(self, mock_g, mock_logger):
mock_g.user = security_manager.find_user("admin")
@@ -834,7 +834,7 @@ def test_import_v1_database_masked_ssh_tunnel_only_priv_key_psswd(
}
}
- @patch("superset.databases.commands.importers.v1.import_dataset")
+ @patch("superset.commands.database.importers.v1.import_dataset")
def test_import_v1_rollback(self, mock_import_dataset):
"""Test than on an exception everything is rolled back"""
num_databases = db.session.query(Database).count()
@@ -860,7 +860,7 @@ def test_import_v1_rollback(self, mock_import_dataset):
class TestTestConnectionDatabaseCommand(SupersetTestCase):
@patch("superset.daos.database.Database._get_sqla_engine")
- @patch("superset.databases.commands.test_connection.event_logger.log_with_context")
+ @patch("superset.commands.database.test_connection.event_logger.log_with_context")
@patch("superset.utils.core.g")
def test_connection_db_exception(
self, mock_g, mock_event_logger, mock_get_sqla_engine
@@ -881,7 +881,7 @@ def test_connection_db_exception(
mock_event_logger.assert_called()
@patch("superset.daos.database.Database._get_sqla_engine")
- @patch("superset.databases.commands.test_connection.event_logger.log_with_context")
+ @patch("superset.commands.database.test_connection.event_logger.log_with_context")
@patch("superset.utils.core.g")
def test_connection_do_ping_exception(
self, mock_g, mock_event_logger, mock_get_sqla_engine
@@ -903,8 +903,8 @@ def test_connection_do_ping_exception(
== SupersetErrorType.GENERIC_DB_ENGINE_ERROR
)
- @patch("superset.databases.commands.test_connection.func_timeout")
- @patch("superset.databases.commands.test_connection.event_logger.log_with_context")
+ @patch("superset.commands.database.test_connection.func_timeout")
+ @patch("superset.commands.database.test_connection.event_logger.log_with_context")
@patch("superset.utils.core.g")
def test_connection_do_ping_timeout(
self, mock_g, mock_event_logger, mock_func_timeout
@@ -926,7 +926,7 @@ def test_connection_do_ping_timeout(
)
@patch("superset.daos.database.Database._get_sqla_engine")
- @patch("superset.databases.commands.test_connection.event_logger.log_with_context")
+ @patch("superset.commands.database.test_connection.event_logger.log_with_context")
@patch("superset.utils.core.g")
def test_connection_superset_security_connection(
self, mock_g, mock_event_logger, mock_get_sqla_engine
@@ -949,7 +949,7 @@ def test_connection_superset_security_connection(
mock_event_logger.assert_called()
@patch("superset.daos.database.Database._get_sqla_engine")
- @patch("superset.databases.commands.test_connection.event_logger.log_with_context")
+ @patch("superset.commands.database.test_connection.event_logger.log_with_context")
@patch("superset.utils.core.g")
def test_connection_db_api_exc(
self, mock_g, mock_event_logger, mock_get_sqla_engine
@@ -975,7 +975,7 @@ def test_connection_db_api_exc(
@patch("superset.db_engine_specs.base.is_hostname_valid")
@patch("superset.db_engine_specs.base.is_port_open")
-@patch("superset.databases.commands.validate.DatabaseDAO")
+@patch("superset.commands.database.validate.DatabaseDAO")
def test_validate(DatabaseDAO, is_port_open, is_hostname_valid, app_context):
"""
Test parameter validation.
diff --git a/tests/integration_tests/databases/ssh_tunnel/commands/commands_tests.py b/tests/integration_tests/databases/ssh_tunnel/commands/commands_tests.py
index 64bc0d85725ff..1cd9afcc809c7 100644
--- a/tests/integration_tests/databases/ssh_tunnel/commands/commands_tests.py
+++ b/tests/integration_tests/databases/ssh_tunnel/commands/commands_tests.py
@@ -20,13 +20,13 @@
import pytest
from superset import security_manager
-from superset.databases.ssh_tunnel.commands.create import CreateSSHTunnelCommand
-from superset.databases.ssh_tunnel.commands.delete import DeleteSSHTunnelCommand
-from superset.databases.ssh_tunnel.commands.exceptions import (
+from superset.commands.database.ssh_tunnel.create import CreateSSHTunnelCommand
+from superset.commands.database.ssh_tunnel.delete import DeleteSSHTunnelCommand
+from superset.commands.database.ssh_tunnel.exceptions import (
SSHTunnelInvalidError,
SSHTunnelNotFoundError,
)
-from superset.databases.ssh_tunnel.commands.update import UpdateSSHTunnelCommand
+from superset.commands.database.ssh_tunnel.update import UpdateSSHTunnelCommand
from tests.integration_tests.base_tests import SupersetTestCase
@@ -67,7 +67,7 @@ def test_update_ssh_tunnel_not_found(self, mock_g):
class TestDeleteSSHTunnelCommand(SupersetTestCase):
@mock.patch("superset.utils.core.g")
- @mock.patch("superset.databases.ssh_tunnel.commands.delete.is_feature_enabled")
+ @mock.patch("superset.commands.database.ssh_tunnel.delete.is_feature_enabled")
def test_delete_ssh_tunnel_not_found(self, mock_g, mock_delete_is_feature_enabled):
mock_g.user = security_manager.find_user("admin")
mock_delete_is_feature_enabled.return_value = True
diff --git a/tests/integration_tests/datasets/api_tests.py b/tests/integration_tests/datasets/api_tests.py
index f060d36739c98..d969895489d9f 100644
--- a/tests/integration_tests/datasets/api_tests.py
+++ b/tests/integration_tests/datasets/api_tests.py
@@ -30,13 +30,13 @@
from sqlalchemy.sql import func
from superset import app
+from superset.commands.dataset.exceptions import DatasetCreateFailedError
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.daos.exceptions import (
DAOCreateFailedError,
DAODeleteFailedError,
DAOUpdateFailedError,
)
-from superset.datasets.commands.exceptions import DatasetCreateFailedError
from superset.datasets.models import Dataset
from superset.extensions import db, security_manager
from superset.models.core import Database
@@ -2458,7 +2458,7 @@ def test_get_or_create_dataset_database_not_found(self):
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(response["message"], {"database": ["Database does not exist"]})
- @patch("superset.datasets.commands.create.CreateDatasetCommand.run")
+ @patch("superset.commands.dataset.create.CreateDatasetCommand.run")
def test_get_or_create_dataset_create_fails(self, command_run_mock):
"""
Dataset API: Test get or create endpoint when create fails
diff --git a/tests/integration_tests/datasets/commands_tests.py b/tests/integration_tests/datasets/commands_tests.py
index a718c81e29cde..1ea554a81838f 100644
--- a/tests/integration_tests/datasets/commands_tests.py
+++ b/tests/integration_tests/datasets/commands_tests.py
@@ -23,19 +23,19 @@
from sqlalchemy.exc import SQLAlchemyError
from superset import db, security_manager
-from superset.commands.exceptions import CommandInvalidError
-from superset.commands.importers.exceptions import IncorrectVersionError
-from superset.connectors.sqla.models import SqlaTable
-from superset.databases.commands.importers.v1 import ImportDatabasesCommand
-from superset.datasets.commands.create import CreateDatasetCommand
-from superset.datasets.commands.exceptions import (
+from superset.commands.database.importers.v1 import ImportDatabasesCommand
+from superset.commands.dataset.create import CreateDatasetCommand
+from superset.commands.dataset.exceptions import (
DatasetInvalidError,
DatasetNotFoundError,
WarmUpCacheTableNotFoundError,
)
-from superset.datasets.commands.export import ExportDatasetsCommand
-from superset.datasets.commands.importers import v0, v1
-from superset.datasets.commands.warm_up_cache import DatasetWarmUpCacheCommand
+from superset.commands.dataset.export import ExportDatasetsCommand
+from superset.commands.dataset.importers import v0, v1
+from superset.commands.dataset.warm_up_cache import DatasetWarmUpCacheCommand
+from superset.commands.exceptions import CommandInvalidError
+from superset.commands.importers.exceptions import IncorrectVersionError
+from superset.connectors.sqla.models import SqlaTable
from superset.models.core import Database
from superset.models.slice import Slice
from superset.utils.core import get_example_default_schema
@@ -339,7 +339,7 @@ def test_import_v0_dataset_ui_export(self):
db.session.delete(dataset)
db.session.commit()
- @patch("superset.datasets.commands.importers.v1.utils.g")
+ @patch("superset.commands.dataset.importers.v1.utils.g")
@patch("superset.security.manager.g")
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_import_v1_dataset(self, sm_g, utils_g):
diff --git a/tests/integration_tests/datasource_tests.py b/tests/integration_tests/datasource_tests.py
index c2865f7b63f6a..5ab81b58d12cd 100644
--- a/tests/integration_tests/datasource_tests.py
+++ b/tests/integration_tests/datasource_tests.py
@@ -24,11 +24,11 @@
import pytest
from superset import app, db
+from superset.commands.dataset.exceptions import DatasetNotFoundError
from superset.common.utils.query_cache_manager import QueryCacheManager
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.constants import CacheRegion
from superset.daos.exceptions import DatasourceNotFound, DatasourceTypeNotSupportedError
-from superset.datasets.commands.exceptions import DatasetNotFoundError
from superset.exceptions import SupersetGenericDBErrorException
from superset.models.core import Database
from superset.utils.core import backend, get_example_default_schema
@@ -550,7 +550,6 @@ def test_get_samples_with_incorrect_cc(test_client, login_as_admin, virtual_data
table=virtual_dataset,
expression="INCORRECT SQL",
)
- db.session.merge(virtual_dataset)
uri = (
f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table"
diff --git a/tests/integration_tests/explore/api_tests.py b/tests/integration_tests/explore/api_tests.py
index 50606257c267f..e37200e310024 100644
--- a/tests/integration_tests/explore/api_tests.py
+++ b/tests/integration_tests/explore/api_tests.py
@@ -21,9 +21,9 @@
from flask_appbuilder.security.sqla.models import User
from sqlalchemy.orm import Session
+from superset.commands.explore.form_data.state import TemporaryExploreState
from superset.connectors.sqla.models import SqlaTable
from superset.explore.exceptions import DatasetAccessDeniedError
-from superset.explore.form_data.commands.state import TemporaryExploreState
from superset.extensions import cache_manager
from superset.models.slice import Slice
from tests.integration_tests.fixtures.world_bank_dashboard import (
diff --git a/tests/integration_tests/explore/form_data/api_tests.py b/tests/integration_tests/explore/form_data/api_tests.py
index 0e73d0b51656a..5dbd67d4f51d6 100644
--- a/tests/integration_tests/explore/form_data/api_tests.py
+++ b/tests/integration_tests/explore/form_data/api_tests.py
@@ -21,9 +21,9 @@
from flask_appbuilder.security.sqla.models import User
from sqlalchemy.orm import Session
+from superset.commands.dataset.exceptions import DatasetAccessDeniedError
+from superset.commands.explore.form_data.state import TemporaryExploreState
from superset.connectors.sqla.models import SqlaTable
-from superset.datasets.commands.exceptions import DatasetAccessDeniedError
-from superset.explore.form_data.commands.state import TemporaryExploreState
from superset.extensions import cache_manager
from superset.models.slice import Slice
from superset.utils.core import DatasourceType
diff --git a/tests/integration_tests/explore/form_data/commands_tests.py b/tests/integration_tests/explore/form_data/commands_tests.py
index 18dd8415f6c60..781c4fdbb261f 100644
--- a/tests/integration_tests/explore/form_data/commands_tests.py
+++ b/tests/integration_tests/explore/form_data/commands_tests.py
@@ -22,12 +22,12 @@
from superset import app, db, security, security_manager
from superset.commands.exceptions import DatasourceTypeInvalidError
+from superset.commands.explore.form_data.create import CreateFormDataCommand
+from superset.commands.explore.form_data.delete import DeleteFormDataCommand
+from superset.commands.explore.form_data.get import GetFormDataCommand
+from superset.commands.explore.form_data.parameters import CommandParameters
+from superset.commands.explore.form_data.update import UpdateFormDataCommand
from superset.connectors.sqla.models import SqlaTable
-from superset.explore.form_data.commands.create import CreateFormDataCommand
-from superset.explore.form_data.commands.delete import DeleteFormDataCommand
-from superset.explore.form_data.commands.get import GetFormDataCommand
-from superset.explore.form_data.commands.parameters import CommandParameters
-from superset.explore.form_data.commands.update import UpdateFormDataCommand
from superset.models.slice import Slice
from superset.models.sql_lab import Query
from superset.utils.core import DatasourceType, get_example_default_schema
diff --git a/tests/integration_tests/explore/permalink/commands_tests.py b/tests/integration_tests/explore/permalink/commands_tests.py
index eace978d78f26..5402a419bc05a 100644
--- a/tests/integration_tests/explore/permalink/commands_tests.py
+++ b/tests/integration_tests/explore/permalink/commands_tests.py
@@ -21,10 +21,10 @@
from superset import app, db, security, security_manager
from superset.commands.exceptions import DatasourceTypeInvalidError
+from superset.commands.explore.form_data.parameters import CommandParameters
+from superset.commands.explore.permalink.create import CreateExplorePermalinkCommand
+from superset.commands.explore.permalink.get import GetExplorePermalinkCommand
from superset.connectors.sqla.models import SqlaTable
-from superset.explore.form_data.commands.parameters import CommandParameters
-from superset.explore.permalink.commands.create import CreateExplorePermalinkCommand
-from superset.explore.permalink.commands.get import GetExplorePermalinkCommand
from superset.key_value.utils import decode_permalink_id
from superset.models.slice import Slice
from superset.models.sql_lab import Query
@@ -138,8 +138,8 @@ def test_get_permalink_command(self, mock_g):
assert cache_data.get("datasource") == datasource
@patch("superset.security.manager.g")
- @patch("superset.key_value.commands.get.GetKeyValueCommand.run")
- @patch("superset.explore.permalink.commands.get.decode_permalink_id")
+ @patch("superset.commands.key_value.get.GetKeyValueCommand.run")
+ @patch("superset.commands.explore.permalink.get.decode_permalink_id")
@pytest.mark.usefixtures("create_dataset", "create_slice")
def test_get_permalink_command_with_old_dataset_key(
self, decode_id_mock, get_kv_command_mock, mock_g
diff --git a/tests/integration_tests/fixtures/energy_dashboard.py b/tests/integration_tests/fixtures/energy_dashboard.py
index 5b4690f572de4..9687fb4aff752 100644
--- a/tests/integration_tests/fixtures/energy_dashboard.py
+++ b/tests/integration_tests/fixtures/energy_dashboard.py
@@ -82,8 +82,6 @@ def _create_energy_table() -> list[Slice]:
table.metrics.append(
SqlMetric(metric_name="sum__value", expression=f"SUM({col})")
)
- db.session.merge(table)
- db.session.commit()
table.fetch_metadata()
slices = []
diff --git a/tests/integration_tests/import_export_tests.py b/tests/integration_tests/import_export_tests.py
index 5dc8143f77616..c195e3a4cb31e 100644
--- a/tests/integration_tests/import_export_tests.py
+++ b/tests/integration_tests/import_export_tests.py
@@ -32,12 +32,12 @@
load_energy_table_data,
)
from tests.integration_tests.test_app import app
-from superset.dashboards.commands.importers.v0 import decode_dashboards
+from superset.commands.dashboard.importers.v0 import decode_dashboards
from superset import db, security_manager
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
-from superset.dashboards.commands.importers.v0 import import_chart, import_dashboard
-from superset.datasets.commands.importers.v0 import import_dataset
+from superset.commands.dashboard.importers.v0 import import_chart, import_dashboard
+from superset.commands.dataset.importers.v0 import import_dataset
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.utils.core import DatasourceType, get_example_default_schema
diff --git a/tests/integration_tests/importexport/commands_tests.py b/tests/integration_tests/importexport/commands_tests.py
index ceaf0975659b3..9e8f79026057f 100644
--- a/tests/integration_tests/importexport/commands_tests.py
+++ b/tests/integration_tests/importexport/commands_tests.py
@@ -21,7 +21,7 @@
from freezegun import freeze_time
from superset import security_manager
-from superset.databases.commands.export import ExportDatabasesCommand
+from superset.commands.database.export import ExportDatabasesCommand
from superset.utils.database import get_example_database
from tests.integration_tests.base_tests import SupersetTestCase
diff --git a/tests/integration_tests/key_value/commands/create_test.py b/tests/integration_tests/key_value/commands/create_test.py
index a2ee3d13aed22..494456fa0c51a 100644
--- a/tests/integration_tests/key_value/commands/create_test.py
+++ b/tests/integration_tests/key_value/commands/create_test.py
@@ -37,7 +37,7 @@
def test_create_id_entry(app_context: AppContext, admin: User) -> None:
- from superset.key_value.commands.create import CreateKeyValueCommand
+ from superset.commands.key_value.create import CreateKeyValueCommand
from superset.key_value.models import KeyValueEntry
with override_user(admin):
@@ -46,9 +46,7 @@ def test_create_id_entry(app_context: AppContext, admin: User) -> None:
value=JSON_VALUE,
codec=JSON_CODEC,
).run()
- entry = (
- db.session.query(KeyValueEntry).filter_by(id=key.id).autoflush(False).one()
- )
+ entry = db.session.query(KeyValueEntry).filter_by(id=key.id).one()
assert json.loads(entry.value) == JSON_VALUE
assert entry.created_by_fk == admin.id
db.session.delete(entry)
@@ -56,16 +54,14 @@ def test_create_id_entry(app_context: AppContext, admin: User) -> None:
def test_create_uuid_entry(app_context: AppContext, admin: User) -> None:
- from superset.key_value.commands.create import CreateKeyValueCommand
+ from superset.commands.key_value.create import CreateKeyValueCommand
from superset.key_value.models import KeyValueEntry
with override_user(admin):
key = CreateKeyValueCommand(
resource=RESOURCE, value=JSON_VALUE, codec=JSON_CODEC
).run()
- entry = (
- db.session.query(KeyValueEntry).filter_by(uuid=key.uuid).autoflush(False).one()
- )
+ entry = db.session.query(KeyValueEntry).filter_by(uuid=key.uuid).one()
assert json.loads(entry.value) == JSON_VALUE
assert entry.created_by_fk == admin.id
db.session.delete(entry)
@@ -73,7 +69,7 @@ def test_create_uuid_entry(app_context: AppContext, admin: User) -> None:
def test_create_fail_json_entry(app_context: AppContext, admin: User) -> None:
- from superset.key_value.commands.create import CreateKeyValueCommand
+ from superset.commands.key_value.create import CreateKeyValueCommand
with pytest.raises(KeyValueCreateFailedError):
CreateKeyValueCommand(
@@ -84,7 +80,7 @@ def test_create_fail_json_entry(app_context: AppContext, admin: User) -> None:
def test_create_pickle_entry(app_context: AppContext, admin: User) -> None:
- from superset.key_value.commands.create import CreateKeyValueCommand
+ from superset.commands.key_value.create import CreateKeyValueCommand
from superset.key_value.models import KeyValueEntry
with override_user(admin):
@@ -93,9 +89,7 @@ def test_create_pickle_entry(app_context: AppContext, admin: User) -> None:
value=PICKLE_VALUE,
codec=PICKLE_CODEC,
).run()
- entry = (
- db.session.query(KeyValueEntry).filter_by(id=key.id).autoflush(False).one()
- )
+ entry = db.session.query(KeyValueEntry).filter_by(id=key.id).one()
assert type(pickle.loads(entry.value)) == type(PICKLE_VALUE)
assert entry.created_by_fk == admin.id
db.session.delete(entry)
diff --git a/tests/integration_tests/key_value/commands/delete_test.py b/tests/integration_tests/key_value/commands/delete_test.py
index 3c4892faa6467..706aab8880881 100644
--- a/tests/integration_tests/key_value/commands/delete_test.py
+++ b/tests/integration_tests/key_value/commands/delete_test.py
@@ -58,7 +58,7 @@ def test_delete_id_entry(
admin: User,
key_value_entry: KeyValueEntry,
) -> None:
- from superset.key_value.commands.delete import DeleteKeyValueCommand
+ from superset.commands.key_value.delete import DeleteKeyValueCommand
assert DeleteKeyValueCommand(resource=RESOURCE, key=ID_KEY).run() is True
@@ -68,7 +68,7 @@ def test_delete_uuid_entry(
admin: User,
key_value_entry: KeyValueEntry,
) -> None:
- from superset.key_value.commands.delete import DeleteKeyValueCommand
+ from superset.commands.key_value.delete import DeleteKeyValueCommand
assert DeleteKeyValueCommand(resource=RESOURCE, key=UUID_KEY).run() is True
@@ -78,6 +78,6 @@ def test_delete_entry_missing(
admin: User,
key_value_entry: KeyValueEntry,
) -> None:
- from superset.key_value.commands.delete import DeleteKeyValueCommand
+ from superset.commands.key_value.delete import DeleteKeyValueCommand
assert DeleteKeyValueCommand(resource=RESOURCE, key=456).run() is False
diff --git a/tests/integration_tests/key_value/commands/get_test.py b/tests/integration_tests/key_value/commands/get_test.py
index 28a6dd73d5f04..b14c64f752ffc 100644
--- a/tests/integration_tests/key_value/commands/get_test.py
+++ b/tests/integration_tests/key_value/commands/get_test.py
@@ -38,7 +38,7 @@
def test_get_id_entry(app_context: AppContext, key_value_entry: KeyValueEntry) -> None:
- from superset.key_value.commands.get import GetKeyValueCommand
+ from superset.commands.key_value.get import GetKeyValueCommand
value = GetKeyValueCommand(resource=RESOURCE, key=ID_KEY, codec=JSON_CODEC).run()
assert value == JSON_VALUE
@@ -47,7 +47,7 @@ def test_get_id_entry(app_context: AppContext, key_value_entry: KeyValueEntry) -
def test_get_uuid_entry(
app_context: AppContext, key_value_entry: KeyValueEntry
) -> None:
- from superset.key_value.commands.get import GetKeyValueCommand
+ from superset.commands.key_value.get import GetKeyValueCommand
value = GetKeyValueCommand(resource=RESOURCE, key=UUID_KEY, codec=JSON_CODEC).run()
assert value == JSON_VALUE
@@ -57,14 +57,14 @@ def test_get_id_entry_missing(
app_context: AppContext,
key_value_entry: KeyValueEntry,
) -> None:
- from superset.key_value.commands.get import GetKeyValueCommand
+ from superset.commands.key_value.get import GetKeyValueCommand
value = GetKeyValueCommand(resource=RESOURCE, key=456, codec=JSON_CODEC).run()
assert value is None
def test_get_expired_entry(app_context: AppContext) -> None:
- from superset.key_value.commands.get import GetKeyValueCommand
+ from superset.commands.key_value.get import GetKeyValueCommand
from superset.key_value.models import KeyValueEntry
entry = KeyValueEntry(
@@ -83,7 +83,7 @@ def test_get_expired_entry(app_context: AppContext) -> None:
def test_get_future_expiring_entry(app_context: AppContext) -> None:
- from superset.key_value.commands.get import GetKeyValueCommand
+ from superset.commands.key_value.get import GetKeyValueCommand
from superset.key_value.models import KeyValueEntry
id_ = 789
diff --git a/tests/integration_tests/key_value/commands/update_test.py b/tests/integration_tests/key_value/commands/update_test.py
index 2c0fc3e31de51..62d118b19705d 100644
--- a/tests/integration_tests/key_value/commands/update_test.py
+++ b/tests/integration_tests/key_value/commands/update_test.py
@@ -45,7 +45,7 @@ def test_update_id_entry(
admin: User,
key_value_entry: KeyValueEntry,
) -> None:
- from superset.key_value.commands.update import UpdateKeyValueCommand
+ from superset.commands.key_value.update import UpdateKeyValueCommand
from superset.key_value.models import KeyValueEntry
with override_user(admin):
@@ -57,7 +57,7 @@ def test_update_id_entry(
).run()
assert key is not None
assert key.id == ID_KEY
- entry = db.session.query(KeyValueEntry).filter_by(id=ID_KEY).autoflush(False).one()
+ entry = db.session.query(KeyValueEntry).filter_by(id=ID_KEY).one()
assert json.loads(entry.value) == NEW_VALUE
assert entry.changed_by_fk == admin.id
@@ -67,7 +67,7 @@ def test_update_uuid_entry(
admin: User,
key_value_entry: KeyValueEntry,
) -> None:
- from superset.key_value.commands.update import UpdateKeyValueCommand
+ from superset.commands.key_value.update import UpdateKeyValueCommand
from superset.key_value.models import KeyValueEntry
with override_user(admin):
@@ -79,15 +79,13 @@ def test_update_uuid_entry(
).run()
assert key is not None
assert key.uuid == UUID_KEY
- entry = (
- db.session.query(KeyValueEntry).filter_by(uuid=UUID_KEY).autoflush(False).one()
- )
+ entry = db.session.query(KeyValueEntry).filter_by(uuid=UUID_KEY).one()
assert json.loads(entry.value) == NEW_VALUE
assert entry.changed_by_fk == admin.id
def test_update_missing_entry(app_context: AppContext, admin: User) -> None:
- from superset.key_value.commands.update import UpdateKeyValueCommand
+ from superset.commands.key_value.update import UpdateKeyValueCommand
with override_user(admin):
key = UpdateKeyValueCommand(
diff --git a/tests/integration_tests/key_value/commands/upsert_test.py b/tests/integration_tests/key_value/commands/upsert_test.py
index c26b66d02e7bf..b23ddaee974f6 100644
--- a/tests/integration_tests/key_value/commands/upsert_test.py
+++ b/tests/integration_tests/key_value/commands/upsert_test.py
@@ -45,7 +45,7 @@ def test_upsert_id_entry(
admin: User,
key_value_entry: KeyValueEntry,
) -> None:
- from superset.key_value.commands.upsert import UpsertKeyValueCommand
+ from superset.commands.key_value.upsert import UpsertKeyValueCommand
from superset.key_value.models import KeyValueEntry
with override_user(admin):
@@ -57,9 +57,7 @@ def test_upsert_id_entry(
).run()
assert key is not None
assert key.id == ID_KEY
- entry = (
- db.session.query(KeyValueEntry).filter_by(id=int(ID_KEY)).autoflush(False).one()
- )
+ entry = db.session.query(KeyValueEntry).filter_by(id=int(ID_KEY)).one()
assert json.loads(entry.value) == NEW_VALUE
assert entry.changed_by_fk == admin.id
@@ -69,7 +67,7 @@ def test_upsert_uuid_entry(
admin: User,
key_value_entry: KeyValueEntry,
) -> None:
- from superset.key_value.commands.upsert import UpsertKeyValueCommand
+ from superset.commands.key_value.upsert import UpsertKeyValueCommand
from superset.key_value.models import KeyValueEntry
with override_user(admin):
@@ -81,15 +79,13 @@ def test_upsert_uuid_entry(
).run()
assert key is not None
assert key.uuid == UUID_KEY
- entry = (
- db.session.query(KeyValueEntry).filter_by(uuid=UUID_KEY).autoflush(False).one()
- )
+ entry = db.session.query(KeyValueEntry).filter_by(uuid=UUID_KEY).one()
assert json.loads(entry.value) == NEW_VALUE
assert entry.changed_by_fk == admin.id
def test_upsert_missing_entry(app_context: AppContext, admin: User) -> None:
- from superset.key_value.commands.upsert import UpsertKeyValueCommand
+ from superset.commands.key_value.upsert import UpsertKeyValueCommand
from superset.key_value.models import KeyValueEntry
with override_user(admin):
diff --git a/tests/integration_tests/migrations/06e1e70058c7_migrate_legacy_area__tests.py b/tests/integration_tests/migrations/06e1e70058c7_migrate_legacy_area__tests.py
deleted file mode 100644
index f02d069b2bafb..0000000000000
--- a/tests/integration_tests/migrations/06e1e70058c7_migrate_legacy_area__tests.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-import json
-
-from superset.app import SupersetApp
-from superset.migrations.shared.migrate_viz import MigrateAreaChart
-
-area_form_data = """{
- "adhoc_filters": [],
- "annotation_layers": [],
- "bottom_margin": "auto",
- "color_scheme": "lyftColors",
- "comparison_type": "values",
- "contribution": true,
- "datasource": "2__table",
- "extra_form_data": {},
- "granularity_sqla": "ds",
- "groupby": [
- "gender"
- ],
- "line_interpolation": "linear",
- "metrics": [
- "sum__num"
- ],
- "order_desc": true,
- "rich_tooltip": true,
- "rolling_type": "None",
- "row_limit": 10000,
- "show_brush": "auto",
- "show_controls": true,
- "show_legend": true,
- "slice_id": 165,
- "stacked_style": "stack",
- "time_grain_sqla": "P1D",
- "time_range": "No filter",
- "viz_type": "area",
- "x_axis_format": "smart_date",
- "x_axis_label": "x asix label",
- "x_axis_showminmax": false,
- "x_ticks_layout": "auto",
- "y_axis_bounds": [
- null,
- null
- ],
- "y_axis_format": "SMART_NUMBER"
-}
-"""
-
-
-def test_area_migrate(app_context: SupersetApp) -> None:
- from superset.models.slice import Slice
-
- slc = Slice(
- viz_type=MigrateAreaChart.source_viz_type,
- datasource_type="table",
- params=area_form_data,
- query_context=f'{{"form_data": {area_form_data}}}',
- )
-
- slc = MigrateAreaChart.upgrade_slice(slc)
- assert slc.viz_type == MigrateAreaChart.target_viz_type
- # verify form_data
- new_form_data = json.loads(slc.params)
- assert new_form_data["contributionMode"] == "row"
- assert "contribution" not in new_form_data
- assert new_form_data["show_extra_controls"] is True
- assert new_form_data["stack"] == "Stack"
- assert new_form_data["x_axis_title"] == "x asix label"
- assert new_form_data["x_axis_title_margin"] == 30
- assert json.dumps(new_form_data["form_data_bak"], sort_keys=True) == json.dumps(
- json.loads(area_form_data), sort_keys=True
- )
-
- # verify query_context
- new_query_context = json.loads(slc.query_context)
- assert (
- new_query_context["form_data"]["viz_type"] == MigrateAreaChart.target_viz_type
- )
-
- # downgrade
- slc = MigrateAreaChart.downgrade_slice(slc)
- assert slc.viz_type == MigrateAreaChart.source_viz_type
- assert json.dumps(json.loads(slc.params), sort_keys=True) == json.dumps(
- json.loads(area_form_data), sort_keys=True
- )
diff --git a/tests/integration_tests/migrations/c747c78868b6_migrating_legacy_treemap__tests.py b/tests/integration_tests/migrations/c747c78868b6_migrating_legacy_treemap__tests.py
index 3e9ef330924cb..e67d87fa13e09 100644
--- a/tests/integration_tests/migrations/c747c78868b6_migrating_legacy_treemap__tests.py
+++ b/tests/integration_tests/migrations/c747c78868b6_migrating_legacy_treemap__tests.py
@@ -68,7 +68,7 @@ def test_treemap_migrate(app_context: SupersetApp) -> None:
query_context=f'{{"form_data": {treemap_form_data}}}',
)
- slc = MigrateTreeMap.upgrade_slice(slc)
+ MigrateTreeMap.upgrade_slice(slc)
assert slc.viz_type == MigrateTreeMap.target_viz_type
# verify form_data
new_form_data = json.loads(slc.params)
@@ -84,7 +84,7 @@ def test_treemap_migrate(app_context: SupersetApp) -> None:
assert new_query_context["form_data"]["viz_type"] == "treemap_v2"
# downgrade
- slc = MigrateTreeMap.downgrade_slice(slc)
+ MigrateTreeMap.downgrade_slice(slc)
assert slc.viz_type == MigrateTreeMap.source_viz_type
assert json.dumps(json.loads(slc.params), sort_keys=True) == json.dumps(
json.loads(treemap_form_data), sort_keys=True
diff --git a/tests/integration_tests/queries/saved_queries/api_tests.py b/tests/integration_tests/queries/saved_queries/api_tests.py
index 09929e4d231bd..c51c0dcbf09ca 100644
--- a/tests/integration_tests/queries/saved_queries/api_tests.py
+++ b/tests/integration_tests/queries/saved_queries/api_tests.py
@@ -17,6 +17,7 @@
# isort:skip_file
"""Unit tests for Superset"""
import json
+from datetime import datetime
from io import BytesIO
from typing import Optional
from zipfile import is_zipfile, ZipFile
@@ -24,6 +25,7 @@
import yaml
import pytest
import prison
+from freezegun import freeze_time
from sqlalchemy.sql import func, and_
import tests.integration_tests.test_app
@@ -507,14 +509,17 @@ def test_get_saved_query(self):
db.session.query(SavedQuery).filter(SavedQuery.label == "label1").all()[0]
)
self.login(username="admin")
- uri = f"api/v1/saved_query/{saved_query.id}"
- rv = self.get_assert_metric(uri, "get")
- assert rv.status_code == 200
+ with freeze_time(datetime.now()):
+ uri = f"api/v1/saved_query/{saved_query.id}"
+ rv = self.get_assert_metric(uri, "get")
+ assert rv.status_code == 200
expected_result = {
"id": saved_query.id,
"database": {"id": saved_query.database.id, "database_name": "examples"},
"description": "cool description",
+ "changed_by": None,
+ "changed_on_delta_humanized": "now",
"created_by": {
"first_name": saved_query.created_by.first_name,
"id": saved_query.created_by.id,
@@ -527,9 +532,8 @@ def test_get_saved_query(self):
"template_parameters": None,
}
data = json.loads(rv.data.decode("utf-8"))
- self.assertIn("changed_on_delta_humanized", data["result"])
for key, value in data["result"].items():
- if key not in ("changed_on_delta_humanized",):
+ if key != "changed_on":
assert value == expected_result[key]
def test_get_saved_query_not_found(self):
diff --git a/tests/integration_tests/queries/saved_queries/commands_tests.py b/tests/integration_tests/queries/saved_queries/commands_tests.py
index 5c7b86220981c..cccc40998583b 100644
--- a/tests/integration_tests/queries/saved_queries/commands_tests.py
+++ b/tests/integration_tests/queries/saved_queries/commands_tests.py
@@ -23,13 +23,11 @@
from superset import db, security_manager
from superset.commands.exceptions import CommandInvalidError
from superset.commands.importers.exceptions import IncorrectVersionError
+from superset.commands.query.exceptions import SavedQueryNotFoundError
+from superset.commands.query.export import ExportSavedQueriesCommand
+from superset.commands.query.importers.v1 import ImportSavedQueriesCommand
from superset.models.core import Database
from superset.models.sql_lab import SavedQuery
-from superset.queries.saved_queries.commands.exceptions import SavedQueryNotFoundError
-from superset.queries.saved_queries.commands.export import ExportSavedQueriesCommand
-from superset.queries.saved_queries.commands.importers.v1 import (
- ImportSavedQueriesCommand,
-)
from superset.utils.database import get_example_database
from tests.integration_tests.base_tests import SupersetTestCase
from tests.integration_tests.fixtures.importexport import (
diff --git a/tests/integration_tests/reports/alert_tests.py b/tests/integration_tests/reports/alert_tests.py
index 76890a19e227b..6664d65a9bfd6 100644
--- a/tests/integration_tests/reports/alert_tests.py
+++ b/tests/integration_tests/reports/alert_tests.py
@@ -22,7 +22,7 @@
import pytest
from pytest_mock import MockFixture
-from superset.reports.commands.exceptions import AlertQueryError
+from superset.commands.report.exceptions import AlertQueryError
from superset.reports.models import ReportCreationMethod, ReportScheduleType
from superset.tasks.types import ExecutorType
from superset.utils.database import get_example_database
@@ -64,7 +64,7 @@ def test_execute_query_as_report_executor(
app_context: None,
get_user,
) -> None:
- from superset.reports.commands.alert import AlertCommand
+ from superset.commands.report.alert import AlertCommand
from superset.reports.models import ReportSchedule
with app.app_context():
@@ -86,7 +86,7 @@ def test_execute_query_as_report_executor(
)
command = AlertCommand(report_schedule=report_schedule)
override_user_mock = mocker.patch(
- "superset.reports.commands.alert.override_user"
+ "superset.commands.report.alert.override_user"
)
cm = (
pytest.raises(type(expected_result))
@@ -103,10 +103,10 @@ def test_execute_query_as_report_executor(
def test_execute_query_succeeded_no_retry(
mocker: MockFixture, app_context: None
) -> None:
- from superset.reports.commands.alert import AlertCommand
+ from superset.commands.report.alert import AlertCommand
execute_query_mock = mocker.patch(
- "superset.reports.commands.alert.AlertCommand._execute_query",
+ "superset.commands.report.alert.AlertCommand._execute_query",
side_effect=lambda: pd.DataFrame([{"sample_col": 0}]),
)
@@ -120,10 +120,10 @@ def test_execute_query_succeeded_no_retry(
def test_execute_query_succeeded_with_retries(
mocker: MockFixture, app_context: None
) -> None:
- from superset.reports.commands.alert import AlertCommand, AlertQueryError
+ from superset.commands.report.alert import AlertCommand, AlertQueryError
execute_query_mock = mocker.patch(
- "superset.reports.commands.alert.AlertCommand._execute_query"
+ "superset.commands.report.alert.AlertCommand._execute_query"
)
query_executed_count = 0
@@ -150,10 +150,10 @@ def _mocked_execute_query() -> pd.DataFrame:
def test_execute_query_failed_no_retry(mocker: MockFixture, app_context: None) -> None:
- from superset.reports.commands.alert import AlertCommand, AlertQueryTimeout
+ from superset.commands.report.alert import AlertCommand, AlertQueryTimeout
execute_query_mock = mocker.patch(
- "superset.reports.commands.alert.AlertCommand._execute_query"
+ "superset.commands.report.alert.AlertCommand._execute_query"
)
def _mocked_execute_query() -> None:
@@ -172,10 +172,10 @@ def _mocked_execute_query() -> None:
def test_execute_query_failed_max_retries(
mocker: MockFixture, app_context: None
) -> None:
- from superset.reports.commands.alert import AlertCommand, AlertQueryError
+ from superset.commands.report.alert import AlertCommand, AlertQueryError
execute_query_mock = mocker.patch(
- "superset.reports.commands.alert.AlertCommand._execute_query"
+ "superset.commands.report.alert.AlertCommand._execute_query"
)
def _mocked_execute_query() -> None:
diff --git a/tests/integration_tests/reports/commands/create_dashboard_report_tests.py b/tests/integration_tests/reports/commands/create_dashboard_report_tests.py
index 81945c18a9abd..a7f3001aa80c0 100644
--- a/tests/integration_tests/reports/commands/create_dashboard_report_tests.py
+++ b/tests/integration_tests/reports/commands/create_dashboard_report_tests.py
@@ -18,9 +18,9 @@
import pytest
from superset import db
+from superset.commands.report.create import CreateReportScheduleCommand
+from superset.commands.report.exceptions import ReportScheduleInvalidError
from superset.models.dashboard import Dashboard
-from superset.reports.commands.create import CreateReportScheduleCommand
-from superset.reports.commands.exceptions import ReportScheduleInvalidError
from superset.reports.models import (
ReportCreationMethod,
ReportRecipientType,
diff --git a/tests/integration_tests/reports/commands/execute_dashboard_report_tests.py b/tests/integration_tests/reports/commands/execute_dashboard_report_tests.py
index fe20365765339..68150a9c3c28b 100644
--- a/tests/integration_tests/reports/commands/execute_dashboard_report_tests.py
+++ b/tests/integration_tests/reports/commands/execute_dashboard_report_tests.py
@@ -20,11 +20,9 @@
from flask import current_app
-from superset.dashboards.permalink.commands.create import (
- CreateDashboardPermalinkCommand,
-)
+from superset.commands.dashboard.permalink.create import CreateDashboardPermalinkCommand
+from superset.commands.report.execute import AsyncExecuteReportScheduleCommand
from superset.models.dashboard import Dashboard
-from superset.reports.commands.execute import AsyncExecuteReportScheduleCommand
from superset.reports.models import ReportSourceFormat
from tests.integration_tests.fixtures.tabbed_dashboard import tabbed_dashboard
from tests.integration_tests.reports.utils import create_dashboard_report
@@ -32,10 +30,10 @@
@patch("superset.reports.notifications.email.send_email_smtp")
@patch(
- "superset.reports.commands.execute.DashboardScreenshot",
+ "superset.commands.report.execute.DashboardScreenshot",
)
@patch(
- "superset.dashboards.permalink.commands.create.CreateDashboardPermalinkCommand.run"
+ "superset.commands.dashboard.permalink.create.CreateDashboardPermalinkCommand.run"
)
def test_report_for_dashboard_with_tabs(
create_dashboard_permalink_mock: MagicMock,
@@ -70,10 +68,10 @@ def test_report_for_dashboard_with_tabs(
@patch("superset.reports.notifications.email.send_email_smtp")
@patch(
- "superset.reports.commands.execute.DashboardScreenshot",
+ "superset.commands.report.execute.DashboardScreenshot",
)
@patch(
- "superset.dashboards.permalink.commands.create.CreateDashboardPermalinkCommand.run"
+ "superset.commands.dashboard.permalink.create.CreateDashboardPermalinkCommand.run"
)
def test_report_with_header_data(
create_dashboard_permalink_mock: MagicMock,
diff --git a/tests/integration_tests/reports/commands_tests.py b/tests/integration_tests/reports/commands_tests.py
index 120559f8fd686..939c9c0cfa9f8 100644
--- a/tests/integration_tests/reports/commands_tests.py
+++ b/tests/integration_tests/reports/commands_tests.py
@@ -39,11 +39,7 @@
from sqlalchemy.sql import func
from superset import db
-from superset.exceptions import SupersetException
-from superset.models.core import Database
-from superset.models.dashboard import Dashboard
-from superset.models.slice import Slice
-from superset.reports.commands.exceptions import (
+from superset.commands.report.exceptions import (
AlertQueryError,
AlertQueryInvalidTypeError,
AlertQueryMultipleColumnsError,
@@ -58,11 +54,15 @@
ReportScheduleSystemErrorsException,
ReportScheduleWorkingTimeoutError,
)
-from superset.reports.commands.execute import (
+from superset.commands.report.execute import (
AsyncExecuteReportScheduleCommand,
BaseReportState,
)
-from superset.reports.commands.log_prune import AsyncPruneReportScheduleLogCommand
+from superset.commands.report.log_prune import AsyncPruneReportScheduleLogCommand
+from superset.exceptions import SupersetException
+from superset.models.core import Database
+from superset.models.dashboard import Dashboard
+from superset.models.slice import Slice
from superset.reports.models import (
ReportDataFormat,
ReportExecutionLog,
@@ -1607,7 +1607,7 @@ def test_soft_timeout_alert(email_mock, create_alert_email_chart):
"""
from celery.exceptions import SoftTimeLimitExceeded
- from superset.reports.commands.exceptions import AlertQueryTimeout
+ from superset.commands.report.exceptions import AlertQueryTimeout
with patch.object(
create_alert_email_chart.database.db_engine_spec, "execute", return_value=None
@@ -1748,7 +1748,7 @@ def test_fail_screenshot(screenshot_mock, email_mock, create_report_email_chart)
"""
from celery.exceptions import SoftTimeLimitExceeded
- from superset.reports.commands.exceptions import AlertQueryTimeout
+ from superset.commands.report.exceptions import AlertQueryTimeout
screenshot_mock.side_effect = Exception("Unexpected error")
with pytest.raises(ReportScheduleScreenshotFailedError):
@@ -1919,7 +1919,6 @@ def test_grace_period_error_flap(
# Change report_schedule to valid
create_invalid_sql_alert_email_chart.sql = "SELECT 1 AS metric"
create_invalid_sql_alert_email_chart.grace_period = 0
- db.session.merge(create_invalid_sql_alert_email_chart)
db.session.commit()
with freeze_time("2020-01-01T00:31:00Z"):
@@ -1936,7 +1935,6 @@ def test_grace_period_error_flap(
create_invalid_sql_alert_email_chart.sql = "SELECT 'first'"
create_invalid_sql_alert_email_chart.grace_period = 10
- db.session.merge(create_invalid_sql_alert_email_chart)
db.session.commit()
# assert that after a success, when back to error we send the error notification
@@ -1965,8 +1963,8 @@ def test_prune_log_soft_time_out(bulk_delete_logs, create_report_email_dashboard
assert str(excinfo.value) == "SoftTimeLimitExceeded()"
-@patch("superset.reports.commands.execute.logger")
-@patch("superset.reports.commands.execute.create_notification")
+@patch("superset.commands.report.execute.logger")
+@patch("superset.commands.report.execute.create_notification")
def test__send_with_client_errors(notification_mock, logger_mock):
notification_content = "I am some content"
recipients = ["test@foo.com"]
@@ -1980,8 +1978,8 @@ def test__send_with_client_errors(notification_mock, logger_mock):
)
-@patch("superset.reports.commands.execute.logger")
-@patch("superset.reports.commands.execute.create_notification")
+@patch("superset.commands.report.execute.logger")
+@patch("superset.commands.report.execute.create_notification")
def test__send_with_multiple_errors(notification_mock, logger_mock):
notification_content = "I am some content"
recipients = ["test@foo.com", "test2@bar.com"]
@@ -2007,8 +2005,8 @@ def test__send_with_multiple_errors(notification_mock, logger_mock):
)
-@patch("superset.reports.commands.execute.logger")
-@patch("superset.reports.commands.execute.create_notification")
+@patch("superset.commands.report.execute.logger")
+@patch("superset.commands.report.execute.create_notification")
def test__send_with_server_errors(notification_mock, logger_mock):
notification_content = "I am some content"
recipients = ["test@foo.com"]
diff --git a/tests/integration_tests/reports/scheduler_tests.py b/tests/integration_tests/reports/scheduler_tests.py
index 29dd58273a2c2..ee93ef48a4032 100644
--- a/tests/integration_tests/reports/scheduler_tests.py
+++ b/tests/integration_tests/reports/scheduler_tests.py
@@ -154,11 +154,11 @@ def test_scheduler_feature_flag_off(execute_mock, is_feature_enabled, owners):
@pytest.mark.usefixtures("owners")
-@patch("superset.reports.commands.execute.AsyncExecuteReportScheduleCommand.__init__")
-@patch("superset.reports.commands.execute.AsyncExecuteReportScheduleCommand.run")
+@patch("superset.commands.report.execute.AsyncExecuteReportScheduleCommand.__init__")
+@patch("superset.commands.report.execute.AsyncExecuteReportScheduleCommand.run")
@patch("superset.tasks.scheduler.execute.update_state")
def test_execute_task(update_state_mock, command_mock, init_mock, owners):
- from superset.reports.commands.exceptions import ReportScheduleUnexpectedError
+ from superset.commands.report.exceptions import ReportScheduleUnexpectedError
with app.app_context():
report_schedule = insert_report_schedule(
@@ -179,8 +179,8 @@ def test_execute_task(update_state_mock, command_mock, init_mock, owners):
@pytest.mark.usefixtures("owners")
-@patch("superset.reports.commands.execute.AsyncExecuteReportScheduleCommand.__init__")
-@patch("superset.reports.commands.execute.AsyncExecuteReportScheduleCommand.run")
+@patch("superset.commands.report.execute.AsyncExecuteReportScheduleCommand.__init__")
+@patch("superset.commands.report.execute.AsyncExecuteReportScheduleCommand.run")
@patch("superset.tasks.scheduler.execute.update_state")
@patch("superset.utils.log.logger")
def test_execute_task_with_command_exception(
diff --git a/tests/integration_tests/result_set_tests.py b/tests/integration_tests/result_set_tests.py
index a39e0ac0d45cf..3e2b3656c212c 100644
--- a/tests/integration_tests/result_set_tests.py
+++ b/tests/integration_tests/result_set_tests.py
@@ -21,6 +21,7 @@
from superset.dataframe import df_to_records
from superset.db_engine_specs import BaseEngineSpec
from superset.result_set import dedup, SupersetResultSet
+from superset.utils.core import GenericDataType
from .base_tests import SupersetTestCase
@@ -48,9 +49,27 @@ def test_get_columns_basic(self):
self.assertEqual(
results.columns,
[
- {"is_dttm": False, "type": "STRING", "column_name": "a", "name": "a"},
- {"is_dttm": False, "type": "STRING", "column_name": "b", "name": "b"},
- {"is_dttm": False, "type": "STRING", "column_name": "c", "name": "c"},
+ {
+ "is_dttm": False,
+ "type": "STRING",
+ "type_generic": GenericDataType.STRING,
+ "column_name": "a",
+ "name": "a",
+ },
+ {
+ "is_dttm": False,
+ "type": "STRING",
+ "type_generic": GenericDataType.STRING,
+ "column_name": "b",
+ "name": "b",
+ },
+ {
+ "is_dttm": False,
+ "type": "STRING",
+ "type_generic": GenericDataType.STRING,
+ "column_name": "c",
+ "name": "c",
+ },
],
)
@@ -61,8 +80,20 @@ def test_get_columns_with_int(self):
self.assertEqual(
results.columns,
[
- {"is_dttm": False, "type": "STRING", "column_name": "a", "name": "a"},
- {"is_dttm": False, "type": "INT", "column_name": "b", "name": "b"},
+ {
+ "is_dttm": False,
+ "type": "STRING",
+ "type_generic": GenericDataType.STRING,
+ "column_name": "a",
+ "name": "a",
+ },
+ {
+ "is_dttm": False,
+ "type": "INT",
+ "type_generic": GenericDataType.NUMERIC,
+ "column_name": "b",
+ "name": "b",
+ },
],
)
@@ -76,11 +107,41 @@ def test_get_columns_type_inference(self):
self.assertEqual(
results.columns,
[
- {"is_dttm": False, "type": "FLOAT", "column_name": "a", "name": "a"},
- {"is_dttm": False, "type": "INT", "column_name": "b", "name": "b"},
- {"is_dttm": False, "type": "STRING", "column_name": "c", "name": "c"},
- {"is_dttm": True, "type": "DATETIME", "column_name": "d", "name": "d"},
- {"is_dttm": False, "type": "BOOL", "column_name": "e", "name": "e"},
+ {
+ "is_dttm": False,
+ "type": "FLOAT",
+ "type_generic": GenericDataType.NUMERIC,
+ "column_name": "a",
+ "name": "a",
+ },
+ {
+ "is_dttm": False,
+ "type": "INT",
+ "type_generic": GenericDataType.NUMERIC,
+ "column_name": "b",
+ "name": "b",
+ },
+ {
+ "is_dttm": False,
+ "type": "STRING",
+ "type_generic": GenericDataType.STRING,
+ "column_name": "c",
+ "name": "c",
+ },
+ {
+ "is_dttm": True,
+ "type": "DATETIME",
+ "type_generic": GenericDataType.TEMPORAL,
+ "column_name": "d",
+ "name": "d",
+ },
+ {
+ "is_dttm": False,
+ "type": "BOOL",
+ "type_generic": GenericDataType.BOOLEAN,
+ "column_name": "e",
+ "name": "e",
+ },
],
)
@@ -108,6 +169,7 @@ def test_int64_with_missing_data(self):
cursor_descr = [("user_id", "bigint", None, None, None, None, True)]
results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)
self.assertEqual(results.columns[0]["type"], "BIGINT")
+ self.assertEqual(results.columns[0]["type_generic"], GenericDataType.NUMERIC)
def test_data_as_list_of_lists(self):
data = [[1, "a"], [2, "b"]]
@@ -127,6 +189,7 @@ def test_nullable_bool(self):
cursor_descr = [("is_test", "bool", None, None, None, None, True)]
results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)
self.assertEqual(results.columns[0]["type"], "BOOL")
+ self.assertEqual(results.columns[0]["type_generic"], GenericDataType.BOOLEAN)
df = results.to_pandas_df()
self.assertEqual(
df_to_records(df),
@@ -158,9 +221,13 @@ def test_nested_types(self):
cursor_descr = [("id",), ("dict_arr",), ("num_arr",), ("map_col",)]
results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)
self.assertEqual(results.columns[0]["type"], "INT")
+ self.assertEqual(results.columns[0]["type_generic"], GenericDataType.NUMERIC)
self.assertEqual(results.columns[1]["type"], "STRING")
+ self.assertEqual(results.columns[1]["type_generic"], GenericDataType.STRING)
self.assertEqual(results.columns[2]["type"], "STRING")
+ self.assertEqual(results.columns[2]["type_generic"], GenericDataType.STRING)
self.assertEqual(results.columns[3]["type"], "STRING")
+ self.assertEqual(results.columns[3]["type_generic"], GenericDataType.STRING)
df = results.to_pandas_df()
self.assertEqual(
df_to_records(df),
@@ -204,6 +271,7 @@ def test_single_column_multidim_nested_types(self):
cursor_descr = [("metadata",)]
results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)
self.assertEqual(results.columns[0]["type"], "STRING")
+ self.assertEqual(results.columns[0]["type_generic"], GenericDataType.STRING)
df = results.to_pandas_df()
self.assertEqual(
df_to_records(df),
@@ -219,6 +287,7 @@ def test_nested_list_types(self):
cursor_descr = [("metadata",)]
results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)
self.assertEqual(results.columns[0]["type"], "STRING")
+ self.assertEqual(results.columns[0]["type_generic"], GenericDataType.STRING)
df = results.to_pandas_df()
self.assertEqual(
df_to_records(df), [{"metadata": '[{"TestKey": [123456, "foo"]}]'}]
@@ -229,6 +298,7 @@ def test_empty_datetime(self):
cursor_descr = [("ds", "timestamp", None, None, None, None, True)]
results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)
self.assertEqual(results.columns[0]["type"], "TIMESTAMP")
+ self.assertEqual(results.columns[0]["type_generic"], GenericDataType.TEMPORAL)
def test_no_type_coercion(self):
data = [("a", 1), ("b", 2)]
@@ -238,7 +308,9 @@ def test_no_type_coercion(self):
]
results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)
self.assertEqual(results.columns[0]["type"], "VARCHAR")
+ self.assertEqual(results.columns[0]["type_generic"], GenericDataType.STRING)
self.assertEqual(results.columns[1]["type"], "INT")
+ self.assertEqual(results.columns[1]["type_generic"], GenericDataType.NUMERIC)
def test_empty_data(self):
data = []
diff --git a/tests/integration_tests/security/migrate_roles_tests.py b/tests/integration_tests/security/migrate_roles_tests.py
index ae89fea068661..39d66a82aa671 100644
--- a/tests/integration_tests/security/migrate_roles_tests.py
+++ b/tests/integration_tests/security/migrate_roles_tests.py
@@ -62,7 +62,6 @@ def create_old_role(pvm_map: PvmMigrationMapType, external_pvms):
db.session.query(Role).filter(Role.name == "Dummy Role").one_or_none()
)
new_role.permissions = []
- db.session.merge(new_role)
for old_pvm, new_pvms in pvm_map.items():
security_manager.del_permission_view_menu(old_pvm.permission, old_pvm.view)
for new_pvm in new_pvms:
diff --git a/tests/integration_tests/sql_lab/api_tests.py b/tests/integration_tests/sql_lab/api_tests.py
index 49dd4ea32e7f4..da050c2363d2a 100644
--- a/tests/integration_tests/sql_lab/api_tests.py
+++ b/tests/integration_tests/sql_lab/api_tests.py
@@ -209,7 +209,7 @@ def test_estimate_valid_request(self):
return_value=formatter_response
)
- with mock.patch("superset.sqllab.commands.estimate.db") as mock_superset_db:
+ with mock.patch("superset.commands.sql_lab.estimate.db") as mock_superset_db:
mock_superset_db.session.query().get.return_value = db_mock
data = {"database_id": 1, "sql": "SELECT 1"}
@@ -236,7 +236,7 @@ def test_format_sql_request(self):
self.assertDictEqual(resp_data, success_resp)
self.assertEqual(rv.status_code, 200)
- @mock.patch("superset.sqllab.commands.results.results_backend_use_msgpack", False)
+ @mock.patch("superset.commands.sql_lab.results.results_backend_use_msgpack", False)
def test_execute_required_params(self):
self.login()
client_id = f"{random.getrandbits(64)}"[:10]
@@ -276,7 +276,7 @@ def test_execute_required_params(self):
self.assertDictEqual(resp_data, failed_resp)
self.assertEqual(rv.status_code, 400)
- @mock.patch("superset.sqllab.commands.results.results_backend_use_msgpack", False)
+ @mock.patch("superset.commands.sql_lab.results.results_backend_use_msgpack", False)
def test_execute_valid_request(self) -> None:
from superset import sql_lab as core
@@ -320,9 +320,9 @@ def test_execute_custom_templated(self, sql_lab_mock, mock_dt) -> None:
self.delete_fake_db_for_macros()
- @mock.patch("superset.sqllab.commands.results.results_backend_use_msgpack", False)
+ @mock.patch("superset.commands.sql_lab.results.results_backend_use_msgpack", False)
def test_get_results_with_display_limit(self):
- from superset.sqllab.commands import results as command
+ from superset.commands.sql_lab import results as command
command.results_backend = mock.Mock()
self.login()
@@ -355,7 +355,7 @@ def test_get_results_with_display_limit(self):
compressed = utils.zlib_compress(serialized_payload)
command.results_backend.get.return_value = compressed
- with mock.patch("superset.sqllab.commands.results.db") as mock_superset_db:
+ with mock.patch("superset.commands.sql_lab.results.db") as mock_superset_db:
mock_superset_db.session.query().filter_by().one_or_none.return_value = (
query_mock
)
diff --git a/tests/integration_tests/sql_lab/commands_tests.py b/tests/integration_tests/sql_lab/commands_tests.py
index d76924a8fb1cc..11eb5de0c9092 100644
--- a/tests/integration_tests/sql_lab/commands_tests.py
+++ b/tests/integration_tests/sql_lab/commands_tests.py
@@ -22,6 +22,7 @@
from flask_babel import gettext as __
from superset import app, db, sql_lab
+from superset.commands.sql_lab import estimate, export, results
from superset.common.db_query_status import QueryStatus
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import (
@@ -32,7 +33,6 @@
)
from superset.models.core import Database
from superset.models.sql_lab import Query
-from superset.sqllab.commands import estimate, export, results
from superset.sqllab.limiting_factor import LimitingFactor
from superset.sqllab.schemas import EstimateQueryCostSchema
from superset.utils import core as utils
@@ -47,7 +47,7 @@ def test_validation_no_database(self) -> None:
data: EstimateQueryCostSchema = schema.dump(params)
command = estimate.QueryEstimationCommand(data)
- with mock.patch("superset.sqllab.commands.estimate.db") as mock_superset_db:
+ with mock.patch("superset.commands.sql_lab.estimate.db") as mock_superset_db:
mock_superset_db.session.query().get.return_value = None
with pytest.raises(SupersetErrorException) as ex_info:
command.validate()
@@ -79,7 +79,7 @@ def test_run_timeout(self, is_feature_enabled) -> None:
db_mock.db_engine_spec.query_cost_formatter = mock.Mock(return_value=None)
is_feature_enabled.return_value = False
- with mock.patch("superset.sqllab.commands.estimate.db") as mock_superset_db:
+ with mock.patch("superset.commands.sql_lab.estimate.db") as mock_superset_db:
mock_superset_db.session.query().get.return_value = db_mock
with pytest.raises(SupersetErrorException) as ex_info:
command.run()
@@ -105,7 +105,7 @@ def test_run_success(self) -> None:
db_mock.db_engine_spec.estimate_query_cost = mock.Mock(return_value=100)
db_mock.db_engine_spec.query_cost_formatter = mock.Mock(return_value=payload)
- with mock.patch("superset.sqllab.commands.estimate.db") as mock_superset_db:
+ with mock.patch("superset.commands.sql_lab.estimate.db") as mock_superset_db:
mock_superset_db.session.query().get.return_value = db_mock
result = command.run()
assert result == payload
@@ -223,7 +223,7 @@ def test_run_no_results_backend_executed_sql_limiting_factor(
@pytest.mark.usefixtures("create_database_and_query")
@patch("superset.models.sql_lab.Query.raise_for_access", lambda _: None)
- @patch("superset.sqllab.commands.export.results_backend_use_msgpack", False)
+ @patch("superset.commands.sql_lab.export.results_backend_use_msgpack", False)
def test_run_with_results_backend(self) -> None:
command = export.SqlResultExportCommand("test")
@@ -273,8 +273,8 @@ def create_database_and_query(self):
db.session.delete(query_obj)
db.session.commit()
- @patch("superset.sqllab.commands.results.results_backend_use_msgpack", False)
- @patch("superset.sqllab.commands.results.results_backend", None)
+ @patch("superset.commands.sql_lab.results.results_backend_use_msgpack", False)
+ @patch("superset.commands.sql_lab.results.results_backend", None)
def test_validation_no_results_backend(self) -> None:
command = results.SqlExecutionResultsCommand("test", 1000)
@@ -285,7 +285,7 @@ def test_validation_no_results_backend(self) -> None:
== SupersetErrorType.RESULTS_BACKEND_NOT_CONFIGURED_ERROR
)
- @patch("superset.sqllab.commands.results.results_backend_use_msgpack", False)
+ @patch("superset.commands.sql_lab.results.results_backend_use_msgpack", False)
def test_validation_data_cannot_be_retrieved(self) -> None:
results.results_backend = mock.Mock()
results.results_backend.get.return_value = None
@@ -296,7 +296,7 @@ def test_validation_data_cannot_be_retrieved(self) -> None:
command.run()
assert ex_info.value.error.error_type == SupersetErrorType.RESULTS_BACKEND_ERROR
- @patch("superset.sqllab.commands.results.results_backend_use_msgpack", False)
+ @patch("superset.commands.sql_lab.results.results_backend_use_msgpack", False)
def test_validation_data_not_found(self) -> None:
data = [{"col_0": i} for i in range(100)]
payload = {
@@ -317,7 +317,7 @@ def test_validation_data_not_found(self) -> None:
assert ex_info.value.error.error_type == SupersetErrorType.RESULTS_BACKEND_ERROR
@pytest.mark.usefixtures("create_database_and_query")
- @patch("superset.sqllab.commands.results.results_backend_use_msgpack", False)
+ @patch("superset.commands.sql_lab.results.results_backend_use_msgpack", False)
def test_validation_query_not_found(self) -> None:
data = [{"col_0": i} for i in range(104)]
payload = {
@@ -344,7 +344,7 @@ def test_validation_query_not_found(self) -> None:
)
@pytest.mark.usefixtures("create_database_and_query")
- @patch("superset.sqllab.commands.results.results_backend_use_msgpack", False)
+ @patch("superset.commands.sql_lab.results.results_backend_use_msgpack", False)
def test_run_succeeds(self) -> None:
data = [{"col_0": i} for i in range(104)]
payload = {
diff --git a/tests/integration_tests/strategy_tests.py b/tests/integration_tests/strategy_tests.py
index 6fec16ca7475b..8a7477a8fc58c 100644
--- a/tests/integration_tests/strategy_tests.py
+++ b/tests/integration_tests/strategy_tests.py
@@ -33,7 +33,7 @@
from superset import db
from superset.models.core import Log
-from superset.tags.models import get_tag, ObjectTypes, TaggedObject, TagTypes
+from superset.tags.models import get_tag, ObjectType, TaggedObject, TagType
from superset.tasks.cache import (
DashboardTagsStrategy,
TopNDashboardsStrategy,
@@ -93,7 +93,7 @@ def reset_tag(self, tag):
"load_unicode_dashboard_with_slice", "load_birth_names_dashboard_with_slices"
)
def test_dashboard_tags_strategy(self):
- tag1 = get_tag("tag1", db.session, TagTypes.custom)
+ tag1 = get_tag("tag1", db.session, TagType.custom)
# delete first to make test idempotent
self.reset_tag(tag1)
@@ -103,11 +103,11 @@ def test_dashboard_tags_strategy(self):
self.assertEqual(result, expected)
# tag dashboard 'births' with `tag1`
- tag1 = get_tag("tag1", db.session, TagTypes.custom)
+ tag1 = get_tag("tag1", db.session, TagType.custom)
dash = self.get_dash_by_slug("births")
tag1_urls = [{"chart_id": chart.id} for chart in dash.slices]
tagged_object = TaggedObject(
- tag_id=tag1.id, object_id=dash.id, object_type=ObjectTypes.dashboard
+ tag_id=tag1.id, object_id=dash.id, object_type=ObjectType.dashboard
)
db.session.add(tagged_object)
db.session.commit()
@@ -115,7 +115,7 @@ def test_dashboard_tags_strategy(self):
self.assertCountEqual(strategy.get_payloads(), tag1_urls)
strategy = DashboardTagsStrategy(["tag2"])
- tag2 = get_tag("tag2", db.session, TagTypes.custom)
+ tag2 = get_tag("tag2", db.session, TagType.custom)
self.reset_tag(tag2)
result = strategy.get_payloads()
@@ -128,7 +128,7 @@ def test_dashboard_tags_strategy(self):
tag2_urls = [{"chart_id": chart.id}]
object_id = chart.id
tagged_object = TaggedObject(
- tag_id=tag2.id, object_id=object_id, object_type=ObjectTypes.chart
+ tag_id=tag2.id, object_id=object_id, object_type=ObjectType.chart
)
db.session.add(tagged_object)
db.session.commit()
diff --git a/tests/integration_tests/tagging_tests.py b/tests/integration_tests/tagging_tests.py
index 4ecfd1049f31e..36fb8df3ff647 100644
--- a/tests/integration_tests/tagging_tests.py
+++ b/tests/integration_tests/tagging_tests.py
@@ -70,7 +70,7 @@ def test_dataset_tagging(self):
# Test to make sure that a dataset tag was added to the tagged_object table
tags = self.query_tagged_object_table()
self.assertEqual(1, len(tags))
- self.assertEqual("ObjectTypes.dataset", str(tags[0].object_type))
+ self.assertEqual("ObjectType.dataset", str(tags[0].object_type))
self.assertEqual(test_dataset.id, tags[0].object_id)
# Cleanup the db
@@ -108,7 +108,7 @@ def test_chart_tagging(self):
# Test to make sure that a chart tag was added to the tagged_object table
tags = self.query_tagged_object_table()
self.assertEqual(1, len(tags))
- self.assertEqual("ObjectTypes.chart", str(tags[0].object_type))
+ self.assertEqual("ObjectType.chart", str(tags[0].object_type))
self.assertEqual(test_chart.id, tags[0].object_id)
# Cleanup the db
@@ -144,7 +144,7 @@ def test_dashboard_tagging(self):
# Test to make sure that a dashboard tag was added to the tagged_object table
tags = self.query_tagged_object_table()
self.assertEqual(1, len(tags))
- self.assertEqual("ObjectTypes.dashboard", str(tags[0].object_type))
+ self.assertEqual("ObjectType.dashboard", str(tags[0].object_type))
self.assertEqual(test_dashboard.id, tags[0].object_id)
# Cleanup the db
@@ -178,14 +178,14 @@ def test_saved_query_tagging(self):
self.assertEqual(2, len(tags))
- self.assertEqual("ObjectTypes.query", str(tags[0].object_type))
+ self.assertEqual("ObjectType.query", str(tags[0].object_type))
self.assertEqual("owner:None", str(tags[0].tag.name))
- self.assertEqual("TagTypes.owner", str(tags[0].tag.type))
+ self.assertEqual("TagType.owner", str(tags[0].tag.type))
self.assertEqual(test_saved_query.id, tags[0].object_id)
- self.assertEqual("ObjectTypes.query", str(tags[1].object_type))
+ self.assertEqual("ObjectType.query", str(tags[1].object_type))
self.assertEqual("type:query", str(tags[1].tag.name))
- self.assertEqual("TagTypes.type", str(tags[1].tag.type))
+ self.assertEqual("TagType.type", str(tags[1].tag.type))
self.assertEqual(test_saved_query.id, tags[1].object_id)
# Cleanup the db
@@ -217,7 +217,7 @@ def test_favorite_tagging(self):
# Test to make sure that a favorited object tag was added to the tagged_object table
tags = self.query_tagged_object_table()
self.assertEqual(1, len(tags))
- self.assertEqual("ObjectTypes.chart", str(tags[0].object_type))
+ self.assertEqual("ObjectType.chart", str(tags[0].object_type))
self.assertEqual(test_saved_query.obj_id, tags[0].object_id)
# Cleanup the db
diff --git a/tests/integration_tests/tags/api_tests.py b/tests/integration_tests/tags/api_tests.py
index 33fa4902b26ca..863288a3e73ec 100644
--- a/tests/integration_tests/tags/api_tests.py
+++ b/tests/integration_tests/tags/api_tests.py
@@ -17,10 +17,12 @@
# isort:skip_file
"""Unit tests for Superset"""
import json
+from datetime import datetime
from flask import g
import pytest
import prison
+from freezegun import freeze_time
from sqlalchemy.sql import func
from sqlalchemy import and_
from superset.models.dashboard import Dashboard
@@ -35,7 +37,7 @@
from superset.common.db_query_status import QueryStatus
from superset.models.core import Database
from superset.utils.database import get_example_database, get_main_database
-from superset.tags.models import ObjectTypes, Tag, TagTypes, TaggedObject
+from superset.tags.models import ObjectType, Tag, TagType, TaggedObject
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
load_birth_names_data,
@@ -47,7 +49,7 @@
from tests.integration_tests.fixtures.tags import with_tagging_system_feature
from tests.integration_tests.base_tests import SupersetTestCase
from superset.daos.tag import TagDAO
-from superset.tags.models import ObjectTypes
+from superset.tags.models import ObjectType
TAGS_FIXTURE_COUNT = 10
@@ -84,7 +86,7 @@ def insert_tagged_object(
self,
tag_id: int,
object_id: int,
- object_type: ObjectTypes,
+ object_type: ObjectType,
) -> TaggedObject:
tag = db.session.query(Tag).filter(Tag.id == tag_id).first()
tagged_object = TaggedObject(
@@ -121,13 +123,14 @@ def test_get_tag(self):
"""
Query API: Test get query
"""
- tag = self.insert_tag(
- name="test get tag",
- tag_type="custom",
- )
- self.login(username="admin")
- uri = f"api/v1/tag/{tag.id}"
- rv = self.client.get(uri)
+ with freeze_time(datetime.now()):
+ tag = self.insert_tag(
+ name="test get tag",
+ tag_type="custom",
+ )
+ self.login(username="admin")
+ uri = f"api/v1/tag/{tag.id}"
+ rv = self.client.get(uri)
self.assertEqual(rv.status_code, 200)
expected_result = {
"changed_by": None,
@@ -135,7 +138,7 @@ def test_get_tag(self):
"created_by": None,
"id": tag.id,
"name": "test get tag",
- "type": TagTypes.custom.value,
+ "type": TagType.custom.value,
}
data = json.loads(rv.data.decode("utf-8"))
for key, value in expected_result.items():
@@ -192,7 +195,7 @@ def test_add_tagged_objects(self):
.first()
)
dashboard_id = dashboard.id
- dashboard_type = ObjectTypes.dashboard.value
+ dashboard_type = ObjectType.dashboard.value
uri = f"api/v1/tag/{dashboard_type}/{dashboard_id}/"
example_tag_names = ["example_tag_1", "example_tag_2"]
data = {"properties": {"tags": example_tag_names}}
@@ -207,7 +210,7 @@ def test_add_tagged_objects(self):
tagged_objects = db.session.query(TaggedObject).filter(
TaggedObject.tag_id.in_(tag_ids),
TaggedObject.object_id == dashboard_id,
- TaggedObject.object_type == ObjectTypes.dashboard,
+ TaggedObject.object_type == ObjectType.dashboard,
)
assert tagged_objects.count() == 2
# clean up tags and tagged objects
@@ -225,7 +228,7 @@ def test_add_tagged_objects(self):
def test_delete_tagged_objects(self):
self.login(username="admin")
dashboard_id = 1
- dashboard_type = ObjectTypes.dashboard
+ dashboard_type = ObjectType.dashboard
tag_names = ["example_tag_1", "example_tag_2"]
tags = db.session.query(Tag).filter(Tag.name.in_(tag_names))
assert tags.count() == 2
@@ -295,7 +298,7 @@ def test_get_objects_by_tag(self):
.first()
)
dashboard_id = dashboard.id
- dashboard_type = ObjectTypes.dashboard
+ dashboard_type = ObjectType.dashboard
tag_names = ["example_tag_1", "example_tag_2"]
tags = db.session.query(Tag).filter(Tag.name.in_(tag_names))
for tag in tags:
@@ -331,7 +334,7 @@ def test_get_all_objects(self):
.first()
)
dashboard_id = dashboard.id
- dashboard_type = ObjectTypes.dashboard
+ dashboard_type = ObjectType.dashboard
tag_names = ["example_tag_1", "example_tag_2"]
tags = db.session.query(Tag).filter(Tag.name.in_(tag_names))
for tag in tags:
@@ -480,7 +483,7 @@ def test_post_tag(self):
user_id = self.get_user(username="admin").get_id()
tag = (
db.session.query(Tag)
- .filter(Tag.name == "my_tag", Tag.type == TagTypes.custom)
+ .filter(Tag.name == "my_tag", Tag.type == TagType.custom)
.one_or_none()
)
assert tag is not None
@@ -576,13 +579,13 @@ def test_post_bulk_tag(self):
tagged_objects = db.session.query(TaggedObject).filter(
TaggedObject.object_id == dashboard.id,
- TaggedObject.object_type == ObjectTypes.dashboard,
+ TaggedObject.object_type == ObjectType.dashboard,
)
assert tagged_objects.count() == 2
tagged_objects = db.session.query(TaggedObject).filter(
TaggedObject.object_id == chart.id,
- TaggedObject.object_type == ObjectTypes.chart,
+ TaggedObject.object_type == ObjectType.chart,
)
assert tagged_objects.count() == 2
diff --git a/tests/integration_tests/tags/commands_tests.py b/tests/integration_tests/tags/commands_tests.py
index cd5a024840b1c..48abfd31b4128 100644
--- a/tests/integration_tests/tags/commands_tests.py
+++ b/tests/integration_tests/tags/commands_tests.py
@@ -22,22 +22,22 @@
from werkzeug.utils import secure_filename
from superset import db, security_manager
-from superset.commands.exceptions import CommandInvalidError
-from superset.commands.importers.exceptions import IncorrectVersionError
-from superset.connectors.sqla.models import SqlaTable
-from superset.dashboards.commands.exceptions import DashboardNotFoundError
-from superset.dashboards.commands.export import (
+from superset.commands.dashboard.exceptions import DashboardNotFoundError
+from superset.commands.dashboard.export import (
append_charts,
ExportDashboardsCommand,
get_default_position,
)
-from superset.dashboards.commands.importers import v0, v1
+from superset.commands.dashboard.importers import v0, v1
+from superset.commands.exceptions import CommandInvalidError
+from superset.commands.importers.exceptions import IncorrectVersionError
+from superset.commands.tag.create import CreateCustomTagCommand
+from superset.commands.tag.delete import DeleteTaggedObjectCommand, DeleteTagsCommand
+from superset.connectors.sqla.models import SqlaTable
from superset.models.core import Database
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
-from superset.tags.commands.create import CreateCustomTagCommand
-from superset.tags.commands.delete import DeleteTaggedObjectCommand, DeleteTagsCommand
-from superset.tags.models import ObjectTypes, Tag, TaggedObject, TagTypes
+from superset.tags.models import ObjectType, Tag, TaggedObject, TagType
from tests.integration_tests.base_tests import SupersetTestCase
from tests.integration_tests.fixtures.importexport import (
chart_config,
@@ -65,7 +65,7 @@ def test_create_custom_tag_command(self):
)
example_tags = ["create custom tag example 1", "create custom tag example 2"]
command = CreateCustomTagCommand(
- ObjectTypes.dashboard.value, example_dashboard.id, example_tags
+ ObjectType.dashboard.value, example_dashboard.id, example_tags
)
command.run()
@@ -74,7 +74,7 @@ def test_create_custom_tag_command(self):
.join(TaggedObject)
.filter(
TaggedObject.object_id == example_dashboard.id,
- Tag.type == TagTypes.custom,
+ Tag.type == TagType.custom,
)
.all()
)
@@ -101,7 +101,7 @@ def test_delete_tags_command(self):
)
example_tags = ["create custom tag example 1", "create custom tag example 2"]
command = CreateCustomTagCommand(
- ObjectTypes.dashboard.value, example_dashboard.id, example_tags
+ ObjectType.dashboard.value, example_dashboard.id, example_tags
)
command.run()
@@ -110,7 +110,7 @@ def test_delete_tags_command(self):
.join(TaggedObject)
.filter(
TaggedObject.object_id == example_dashboard.id,
- Tag.type == TagTypes.custom,
+ Tag.type == TagType.custom,
)
.all()
)
@@ -133,7 +133,7 @@ def test_delete_tags_command(self):
)
example_tags = ["create custom tag example 1", "create custom tag example 2"]
command = CreateCustomTagCommand(
- ObjectTypes.dashboard.value, example_dashboard.id, example_tags
+ ObjectType.dashboard.value, example_dashboard.id, example_tags
)
command.run()
@@ -142,14 +142,14 @@ def test_delete_tags_command(self):
.join(Tag)
.filter(
TaggedObject.object_id == example_dashboard.id,
- TaggedObject.object_type == ObjectTypes.dashboard.name,
+ TaggedObject.object_type == ObjectType.dashboard.name,
Tag.name.in_(example_tags),
)
)
assert tagged_objects.count() == 2
# delete one of the tagged objects
command = DeleteTaggedObjectCommand(
- object_type=ObjectTypes.dashboard.value,
+ object_type=ObjectType.dashboard.value,
object_id=example_dashboard.id,
tag=example_tags[0],
)
@@ -159,7 +159,7 @@ def test_delete_tags_command(self):
.join(Tag)
.filter(
TaggedObject.object_id == example_dashboard.id,
- TaggedObject.object_type == ObjectTypes.dashboard.name,
+ TaggedObject.object_type == ObjectType.dashboard.name,
Tag.name.in_(example_tags),
)
)
diff --git a/tests/integration_tests/tags/dao_tests.py b/tests/integration_tests/tags/dao_tests.py
index 8acaa353e9d31..38bb4d0904d74 100644
--- a/tests/integration_tests/tags/dao_tests.py
+++ b/tests/integration_tests/tags/dao_tests.py
@@ -23,7 +23,7 @@
from superset.models.sql_lab import SavedQuery
from superset.daos.tag import TagDAO
from superset.tags.exceptions import InvalidTagNameError
-from superset.tags.models import ObjectTypes, Tag, TaggedObject
+from superset.tags.models import ObjectType, Tag, TaggedObject
from tests.integration_tests.tags.api_tests import TAGS_FIXTURE_COUNT
import tests.integration_tests.test_app # pylint: disable=unused-import
@@ -57,7 +57,7 @@ def insert_tagged_object(
self,
tag_id: int,
object_id: int,
- object_type: ObjectTypes,
+ object_type: ObjectType,
) -> TaggedObject:
tag = db.session.query(Tag).filter(Tag.id == tag_id).first()
tagged_object = TaggedObject(
@@ -113,7 +113,7 @@ def create_tagged_objects(self):
tagged_objects.append(
self.insert_tagged_object(
object_id=dashboard_id,
- object_type=ObjectTypes.dashboard,
+ object_type=ObjectType.dashboard,
tag_id=tag.id,
)
)
@@ -124,17 +124,23 @@ def create_tagged_objects(self):
@pytest.mark.usefixtures("with_tagging_system_feature")
# test create tag
def test_create_tagged_objects(self):
- # test that a tag cannot be added if it has ':' in it
- with pytest.raises(DAOCreateFailedError):
- TagDAO.create_custom_tagged_objects(
- object_type=ObjectTypes.dashboard.name,
- object_id=1,
- tag_names=["invalid:example tag 1"],
- )
+ # test that a tag can be added if it has ':' in it
+ TagDAO.create_custom_tagged_objects(
+ object_type=ObjectType.dashboard.name,
+ object_id=1,
+ tag_names=["valid:example tag 1"],
+ )
+
+ # test that a tag can be added if it has ',' in it
+ TagDAO.create_custom_tagged_objects(
+ object_type=ObjectType.dashboard.name,
+ object_id=1,
+ tag_names=["example,tag,1"],
+ )
# test that a tag can be added if it has a valid name
TagDAO.create_custom_tagged_objects(
- object_type=ObjectTypes.dashboard.name,
+ object_type=ObjectType.dashboard.name,
object_id=1,
tag_names=["example tag 1"],
)
@@ -155,7 +161,7 @@ def test_get_objects_from_tag(self):
dashboard_id = dashboard.id
tag = db.session.query(Tag).filter_by(name="example_tag_1").one()
self.insert_tagged_object(
- object_id=dashboard_id, object_type=ObjectTypes.dashboard, tag_id=tag.id
+ object_id=dashboard_id, object_type=ObjectType.dashboard, tag_id=tag.id
)
# get objects
tagged_objects = TagDAO.get_tagged_objects_for_tags(
@@ -179,7 +185,7 @@ def test_get_objects_from_tag(self):
TaggedObject,
and_(
TaggedObject.object_id == Slice.id,
- TaggedObject.object_type == ObjectTypes.chart,
+ TaggedObject.object_type == ObjectType.chart,
),
)
.distinct(Slice.id)
@@ -191,7 +197,7 @@ def test_get_objects_from_tag(self):
TaggedObject,
and_(
TaggedObject.object_id == Dashboard.id,
- TaggedObject.object_type == ObjectTypes.dashboard,
+ TaggedObject.object_type == ObjectType.dashboard,
),
)
.distinct(Dashboard.id)
@@ -207,13 +213,46 @@ def test_get_objects_from_tag(self):
tagged_objects = TagDAO.get_tagged_objects_for_tags(obj_types=["chart"])
assert len(tagged_objects) == num_charts
+ @pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
+ @pytest.mark.usefixtures("with_tagging_system_feature")
+ @pytest.mark.usefixtures("create_tags")
+ # test get objects from tag
+ def test_get_objects_from_tag_with_id(self):
+ # create tagged objects
+ dashboard = (
+ db.session.query(Dashboard)
+ .filter(Dashboard.dashboard_title == "World Bank's Data")
+ .first()
+ )
+ dashboard_id = dashboard.id
+ tag_1 = db.session.query(Tag).filter_by(name="example_tag_1").one()
+ tag_2 = db.session.query(Tag).filter_by(name="example_tag_2").one()
+ tag_ids = [tag_1.id, tag_2.id]
+ self.insert_tagged_object(
+ object_id=dashboard_id, object_type=ObjectType.dashboard, tag_id=tag_1.id
+ )
+ # get objects
+ tagged_objects = TagDAO.get_tagged_objects_by_tag_id(tag_ids)
+ assert len(tagged_objects) == 1
+
+ # test get objects from tag with type
+ tagged_objects = TagDAO.get_tagged_objects_by_tag_id(
+ tag_ids, obj_types=["dashboard", "chart"]
+ )
+ assert len(tagged_objects) == 1
+
+ tagged_objects = TagDAO.get_tagged_objects_by_tag_id(
+ tag_ids, obj_types=["chart"]
+ )
+ assert len(tagged_objects) == 0
+
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@pytest.mark.usefixtures("with_tagging_system_feature")
@pytest.mark.usefixtures("create_tagged_objects")
def test_find_tagged_object(self):
tag = db.session.query(Tag).filter(Tag.name == "example_tag_1").first()
tagged_object = TagDAO.find_tagged_object(
- object_id=1, object_type=ObjectTypes.dashboard.name, tag_id=tag.id
+ object_id=1, object_type=ObjectType.dashboard.name, tag_id=tag.id
)
assert tagged_object is not None
@@ -269,29 +308,21 @@ def test_delete_tagged_object(self):
.filter(
TaggedObject.tag_id == tag.id,
TaggedObject.object_id == 1,
- TaggedObject.object_type == ObjectTypes.dashboard.name,
+ TaggedObject.object_type == ObjectType.dashboard.name,
)
.first()
)
assert tagged_object is not None
TagDAO.delete_tagged_object(
- object_type=ObjectTypes.dashboard.name, object_id=1, tag_name=tag.name
+ object_type=ObjectType.dashboard.name, object_id=1, tag_name=tag.name
)
tagged_object = (
db.session.query(TaggedObject)
.filter(
TaggedObject.tag_id == tag.id,
TaggedObject.object_id == 1,
- TaggedObject.object_type == ObjectTypes.dashboard.name,
+ TaggedObject.object_type == ObjectType.dashboard.name,
)
.first()
)
assert tagged_object is None
-
- @pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
- @pytest.mark.usefixtures("with_tagging_system_feature")
- def test_validate_tag_name(self):
- assert TagDAO.validate_tag_name("example_tag_name") is True
- assert TagDAO.validate_tag_name("invalid:tag_name") is False
- db.session.query(TaggedObject).delete()
- db.session.query(Tag).delete()
diff --git a/tests/integration_tests/tasks/async_queries_tests.py b/tests/integration_tests/tasks/async_queries_tests.py
index 8e6e595757c4f..01880b7a627ac 100644
--- a/tests/integration_tests/tasks/async_queries_tests.py
+++ b/tests/integration_tests/tasks/async_queries_tests.py
@@ -21,8 +21,8 @@
import pytest
from celery.exceptions import SoftTimeLimitExceeded
-from superset.charts.commands.exceptions import ChartDataQueryFailedError
-from superset.charts.data.commands.get_data_command import ChartDataCommand
+from superset.commands.chart.data.get_data_command import ChartDataCommand
+from superset.commands.chart.exceptions import ChartDataQueryFailedError
from superset.exceptions import SupersetException
from superset.extensions import async_query_manager, security_manager
from tests.integration_tests.base_tests import SupersetTestCase
diff --git a/tests/integration_tests/utils_tests.py b/tests/integration_tests/utils_tests.py
index 6648d72c61788..ddd0b0caf43ef 100644
--- a/tests/integration_tests/utils_tests.py
+++ b/tests/integration_tests/utils_tests.py
@@ -24,7 +24,7 @@
from typing import Any, Optional
from unittest.mock import Mock, patch
-from superset.databases.commands.exceptions import DatabaseInvalidError
+from superset.commands.database.exceptions import DatabaseInvalidError
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
load_birth_names_data,
@@ -59,7 +59,6 @@
get_stacktrace,
json_int_dttm_ser,
json_iso_dttm_ser,
- JSONEncodedDict,
merge_extra_filters,
merge_extra_form_data,
merge_request_params,
@@ -583,15 +582,6 @@ def test_format_timedelta(self):
"-16 days, 4:03:00",
)
- def test_json_encoded_obj(self):
- obj = {"a": 5, "b": ["a", "g", 5]}
- val = '{"a": 5, "b": ["a", "g", 5]}'
- jsonObj = JSONEncodedDict()
- resp = jsonObj.process_bind_param(obj, "dialect")
- self.assertIn('"a": 5', resp)
- self.assertIn('"b": ["a", "g", 5]', resp)
- self.assertEqual(jsonObj.process_result_value(val, "dialect"), obj)
-
def test_validate_json(self):
valid = '{"a": 5, "b": [1, 5, ["g", "h"]]}'
self.assertIsNone(validate_json(valid))
@@ -754,50 +744,6 @@ def test_as_list(self):
self.assertListEqual(as_list([123]), [123])
self.assertListEqual(as_list("foo"), ["foo"])
- @pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
- def test_build_extra_filters(self):
- world_health = db.session.query(Dashboard).filter_by(slug="world_health").one()
- layout = json.loads(world_health.position_json)
- filter_ = db.session.query(Slice).filter_by(slice_name="Region Filter").one()
- world = db.session.query(Slice).filter_by(slice_name="World's Population").one()
- box_plot = db.session.query(Slice).filter_by(slice_name="Box plot").one()
- treemap = db.session.query(Slice).filter_by(slice_name="Treemap").one()
-
- filter_scopes = {
- str(filter_.id): {
- "region": {"scope": ["ROOT_ID"], "immune": [treemap.id]},
- "country_name": {
- "scope": ["ROOT_ID"],
- "immune": [treemap.id, box_plot.id],
- },
- }
- }
-
- default_filters = {
- str(filter_.id): {
- "region": ["North America"],
- "country_name": ["United States"],
- }
- }
-
- # immune to all filters
- assert (
- build_extra_filters(layout, filter_scopes, default_filters, treemap.id)
- == []
- )
-
- # in scope
- assert build_extra_filters(
- layout, filter_scopes, default_filters, world.id
- ) == [
- {"col": "region", "op": "==", "val": "North America"},
- {"col": "country_name", "op": "in", "val": ["United States"]},
- ]
-
- assert build_extra_filters(
- layout, filter_scopes, default_filters, box_plot.id
- ) == [{"col": "region", "op": "==", "val": "North America"}]
-
def test_merge_extra_filters_with_no_extras(self):
form_data = {
"time_range": "Last 10 days",
diff --git a/tests/unit_tests/charts/commands/importers/v1/import_test.py b/tests/unit_tests/charts/commands/importers/v1/import_test.py
index 06e0063fe93ad..f0d142644df25 100644
--- a/tests/unit_tests/charts/commands/importers/v1/import_test.py
+++ b/tests/unit_tests/charts/commands/importers/v1/import_test.py
@@ -30,7 +30,7 @@ def test_import_chart(mocker: MockFixture, session: Session) -> None:
Test importing a chart.
"""
from superset import security_manager
- from superset.charts.commands.importers.v1.utils import import_chart
+ from superset.commands.chart.importers.v1.utils import import_chart
from superset.connectors.sqla.models import SqlaTable
from superset.models.core import Database
from superset.models.slice import Slice
@@ -57,7 +57,7 @@ def test_import_chart_managed_externally(mocker: MockFixture, session: Session)
Test importing a chart that is managed externally.
"""
from superset import security_manager
- from superset.charts.commands.importers.v1.utils import import_chart
+ from superset.commands.chart.importers.v1.utils import import_chart
from superset.connectors.sqla.models import SqlaTable
from superset.models.core import Database
from superset.models.slice import Slice
@@ -87,7 +87,7 @@ def test_import_chart_without_permission(
Test importing a chart when a user doesn't have permissions to create.
"""
from superset import security_manager
- from superset.charts.commands.importers.v1.utils import import_chart
+ from superset.commands.chart.importers.v1.utils import import_chart
from superset.connectors.sqla.models import SqlaTable
from superset.models.core import Database
from superset.models.slice import Slice
diff --git a/tests/unit_tests/charts/commands/importers/v1/utils_test.py b/tests/unit_tests/charts/commands/importers/v1/utils_test.py
index 77d31e7d776b3..de3f805d8bdcf 100644
--- a/tests/unit_tests/charts/commands/importers/v1/utils_test.py
+++ b/tests/unit_tests/charts/commands/importers/v1/utils_test.py
@@ -17,7 +17,7 @@
import json
-from superset.charts.commands.importers.v1.utils import migrate_chart
+from superset.commands.chart.importers.v1.utils import migrate_chart
def test_migrate_chart_area() -> None:
@@ -31,13 +31,21 @@ def test_migrate_chart_area() -> None:
"description": None,
"certified_by": None,
"certification_details": None,
- "viz_type": "area",
+ "viz_type": "echarts_area",
"query_context": None,
"params": json.dumps(
{
- "adhoc_filters": [],
+ "adhoc_filters": [
+ {
+ "clause": "WHERE",
+ "subject": "ds",
+ "operator": "TEMPORAL_RANGE",
+ "comparator": "No filter",
+ "expressionType": "SIMPLE",
+ }
+ ],
"annotation_layers": [],
- "bottom_margin": "auto",
+ "x_axis_title_margin": "auto",
"color_scheme": "supersetColors",
"comparison_type": "values",
"dashboards": [],
diff --git a/tests/unit_tests/common/test_get_aggregated_join_column.py b/tests/unit_tests/common/test_get_aggregated_join_column.py
index 8effacf2494cb..de0b6b92b2850 100644
--- a/tests/unit_tests/common/test_get_aggregated_join_column.py
+++ b/tests/unit_tests/common/test_get_aggregated_join_column.py
@@ -24,7 +24,7 @@
AGGREGATED_JOIN_COLUMN,
QueryContextProcessor,
)
-from superset.connectors.base.models import BaseDatasource
+from superset.connectors.sqla.models import BaseDatasource
from superset.constants import TimeGrain
query_context_processor = QueryContextProcessor(
diff --git a/tests/unit_tests/dao/tag_test.py b/tests/unit_tests/dao/tag_test.py
index 065ed756628cc..5f29d0f28c8ac 100644
--- a/tests/unit_tests/dao/tag_test.py
+++ b/tests/unit_tests/dao/tag_test.py
@@ -149,7 +149,7 @@ def test_user_favorite_tag_exc_raise(mocker):
def test_create_tag_relationship(mocker):
from superset.daos.tag import TagDAO
from superset.tags.models import ( # Assuming these are defined in the same module
- ObjectTypes,
+ ObjectType,
TaggedObject,
)
@@ -157,9 +157,9 @@ def test_create_tag_relationship(mocker):
# Define a list of objects to tag
objects_to_tag = [
- (ObjectTypes.query, 1),
- (ObjectTypes.chart, 2),
- (ObjectTypes.dashboard, 3),
+ (ObjectType.query, 1),
+ (ObjectType.chart, 2),
+ (ObjectType.dashboard, 3),
]
# Call the function
diff --git a/tests/unit_tests/dashboards/commands/importers/v1/import_test.py b/tests/unit_tests/dashboards/commands/importers/v1/import_test.py
index e07a23f6bf848..67e089775598f 100644
--- a/tests/unit_tests/dashboards/commands/importers/v1/import_test.py
+++ b/tests/unit_tests/dashboards/commands/importers/v1/import_test.py
@@ -30,8 +30,8 @@ def test_import_dashboard(mocker: MockFixture, session: Session) -> None:
Test importing a dashboard.
"""
from superset import security_manager
+ from superset.commands.dashboard.importers.v1.utils import import_dashboard
from superset.connectors.sqla.models import SqlaTable
- from superset.dashboards.commands.importers.v1.utils import import_dashboard
from superset.models.core import Database
from superset.models.slice import Slice
from tests.integration_tests.fixtures.importexport import dashboard_config
@@ -58,8 +58,8 @@ def test_import_dashboard_managed_externally(
Test importing a dashboard that is managed externally.
"""
from superset import security_manager
+ from superset.commands.dashboard.importers.v1.utils import import_dashboard
from superset.connectors.sqla.models import SqlaTable
- from superset.dashboards.commands.importers.v1.utils import import_dashboard
from superset.models.core import Database
from superset.models.slice import Slice
from tests.integration_tests.fixtures.importexport import dashboard_config
@@ -86,8 +86,8 @@ def test_import_dashboard_without_permission(
Test importing a dashboard when a user doesn't have permissions to create.
"""
from superset import security_manager
+ from superset.commands.dashboard.importers.v1.utils import import_dashboard
from superset.connectors.sqla.models import SqlaTable
- from superset.dashboards.commands.importers.v1.utils import import_dashboard
from superset.models.core import Database
from superset.models.slice import Slice
from tests.integration_tests.fixtures.importexport import dashboard_config
diff --git a/tests/unit_tests/dashboards/commands/importers/v1/utils_test.py b/tests/unit_tests/dashboards/commands/importers/v1/utils_test.py
index 60a659159a332..0e8436295778b 100644
--- a/tests/unit_tests/dashboards/commands/importers/v1/utils_test.py
+++ b/tests/unit_tests/dashboards/commands/importers/v1/utils_test.py
@@ -29,7 +29,7 @@ def test_update_id_refs_immune_missing( # pylint: disable=invalid-name
immune to filters. The missing chart ID should be simply ignored when the
dashboard is imported.
"""
- from superset.dashboards.commands.importers.v1.utils import update_id_refs
+ from superset.commands.dashboard.importers.v1.utils import update_id_refs
config = {
"position": {
@@ -83,7 +83,7 @@ def test_update_id_refs_immune_missing( # pylint: disable=invalid-name
def test_update_native_filter_config_scope_excluded():
- from superset.dashboards.commands.importers.v1.utils import update_id_refs
+ from superset.commands.dashboard.importers.v1.utils import update_id_refs
config = {
"position": {
diff --git a/tests/unit_tests/databases/api_test.py b/tests/unit_tests/databases/api_test.py
index aa15645ddb995..28ca123ec66a0 100644
--- a/tests/unit_tests/databases/api_test.py
+++ b/tests/unit_tests/databases/api_test.py
@@ -396,7 +396,7 @@ def test_delete_ssh_tunnel(
mocker.patch("sqlalchemy.engine.URL.get_driver_name", return_value="gsheets")
mocker.patch("superset.utils.log.DBEventLogger.log")
mocker.patch(
- "superset.databases.ssh_tunnel.commands.delete.is_feature_enabled",
+ "superset.commands.database.ssh_tunnel.delete.is_feature_enabled",
return_value=True,
)
@@ -472,7 +472,7 @@ def test_delete_ssh_tunnel_not_found(
mocker.patch("sqlalchemy.engine.URL.get_driver_name", return_value="gsheets")
mocker.patch("superset.utils.log.DBEventLogger.log")
mocker.patch(
- "superset.databases.ssh_tunnel.commands.delete.is_feature_enabled",
+ "superset.commands.database.ssh_tunnel.delete.is_feature_enabled",
return_value=True,
)
@@ -559,7 +559,7 @@ def test_apply_dynamic_database_filter(
mocker.patch("sqlalchemy.engine.URL.get_driver_name", return_value="gsheets")
mocker.patch("superset.utils.log.DBEventLogger.log")
mocker.patch(
- "superset.databases.ssh_tunnel.commands.delete.is_feature_enabled",
+ "superset.commands.database.ssh_tunnel.delete.is_feature_enabled",
return_value=False,
)
diff --git a/tests/unit_tests/databases/commands/importers/v1/import_test.py b/tests/unit_tests/databases/commands/importers/v1/import_test.py
index b8bd24d94d187..5fb4d12ce5c22 100644
--- a/tests/unit_tests/databases/commands/importers/v1/import_test.py
+++ b/tests/unit_tests/databases/commands/importers/v1/import_test.py
@@ -17,6 +17,7 @@
# pylint: disable=unused-argument, import-outside-toplevel, invalid-name
import copy
+import json
import pytest
from pytest_mock import MockFixture
@@ -30,7 +31,7 @@ def test_import_database(mocker: MockFixture, session: Session) -> None:
Test importing a database.
"""
from superset import security_manager
- from superset.databases.commands.importers.v1.utils import import_database
+ from superset.commands.database.importers.v1.utils import import_database
from superset.models.core import Database
from tests.integration_tests.fixtures.importexport import database_config
@@ -70,7 +71,7 @@ def test_import_database_sqlite_invalid(mocker: MockFixture, session: Session) -
Test importing a database.
"""
from superset import app, security_manager
- from superset.databases.commands.importers.v1.utils import import_database
+ from superset.commands.database.importers.v1.utils import import_database
from superset.models.core import Database
from tests.integration_tests.fixtures.importexport import database_config_sqlite
@@ -99,7 +100,7 @@ def test_import_database_managed_externally(
Test importing a database that is managed externally.
"""
from superset import security_manager
- from superset.databases.commands.importers.v1.utils import import_database
+ from superset.commands.database.importers.v1.utils import import_database
from superset.models.core import Database
from tests.integration_tests.fixtures.importexport import database_config
@@ -125,7 +126,7 @@ def test_import_database_without_permission(
Test importing a database when a user doesn't have permissions to create.
"""
from superset import security_manager
- from superset.databases.commands.importers.v1.utils import import_database
+ from superset.commands.database.importers.v1.utils import import_database
from superset.models.core import Database
from tests.integration_tests.fixtures.importexport import database_config
@@ -142,3 +143,23 @@ def test_import_database_without_permission(
str(excinfo.value)
== "Database doesn't exist and user doesn't have permission to create databases"
)
+
+
+def test_import_database_with_version(mocker: MockFixture, session: Session) -> None:
+ """
+ Test importing a database with a version set.
+ """
+ from superset import security_manager
+ from superset.commands.database.importers.v1.utils import import_database
+ from superset.models.core import Database
+ from tests.integration_tests.fixtures.importexport import database_config
+
+ mocker.patch.object(security_manager, "can_access", return_value=True)
+
+ engine = session.get_bind()
+ Database.metadata.create_all(engine) # pylint: disable=no-member
+
+ config = copy.deepcopy(database_config)
+ config["extra"]["version"] = "1.1.1"
+ database = import_database(session, config)
+ assert json.loads(database.extra)["version"] == "1.1.1"
diff --git a/tests/unit_tests/databases/commands/test_connection_test.py b/tests/unit_tests/databases/commands/test_connection_test.py
index 8e86cfd1cfe9b..66efa7d717854 100644
--- a/tests/unit_tests/databases/commands/test_connection_test.py
+++ b/tests/unit_tests/databases/commands/test_connection_test.py
@@ -17,7 +17,7 @@
from parameterized import parameterized
-from superset.databases.commands.test_connection import get_log_connection_action
+from superset.commands.database.test_connection import get_log_connection_action
from superset.databases.ssh_tunnel.models import SSHTunnel
diff --git a/tests/unit_tests/databases/ssh_tunnel/commands/create_test.py b/tests/unit_tests/databases/ssh_tunnel/commands/create_test.py
index fbad104c1da00..bd891b64f05ec 100644
--- a/tests/unit_tests/databases/ssh_tunnel/commands/create_test.py
+++ b/tests/unit_tests/databases/ssh_tunnel/commands/create_test.py
@@ -19,11 +19,11 @@
import pytest
from sqlalchemy.orm.session import Session
-from superset.databases.ssh_tunnel.commands.exceptions import SSHTunnelInvalidError
+from superset.commands.database.ssh_tunnel.exceptions import SSHTunnelInvalidError
def test_create_ssh_tunnel_command() -> None:
- from superset.databases.ssh_tunnel.commands.create import CreateSSHTunnelCommand
+ from superset.commands.database.ssh_tunnel.create import CreateSSHTunnelCommand
from superset.databases.ssh_tunnel.models import SSHTunnel
from superset.models.core import Database
@@ -44,7 +44,7 @@ def test_create_ssh_tunnel_command() -> None:
def test_create_ssh_tunnel_command_invalid_params() -> None:
- from superset.databases.ssh_tunnel.commands.create import CreateSSHTunnelCommand
+ from superset.commands.database.ssh_tunnel.create import CreateSSHTunnelCommand
from superset.databases.ssh_tunnel.models import SSHTunnel
from superset.models.core import Database
diff --git a/tests/unit_tests/databases/ssh_tunnel/commands/delete_test.py b/tests/unit_tests/databases/ssh_tunnel/commands/delete_test.py
index 641e34d3477a9..14838ddc58272 100644
--- a/tests/unit_tests/databases/ssh_tunnel/commands/delete_test.py
+++ b/tests/unit_tests/databases/ssh_tunnel/commands/delete_test.py
@@ -54,8 +54,8 @@ def session_with_data(session: Session) -> Iterator[Session]:
def test_delete_ssh_tunnel_command(
mocker: MockFixture, session_with_data: Session
) -> None:
+ from superset.commands.database.ssh_tunnel.delete import DeleteSSHTunnelCommand
from superset.daos.database import DatabaseDAO
- from superset.databases.ssh_tunnel.commands.delete import DeleteSSHTunnelCommand
from superset.databases.ssh_tunnel.models import SSHTunnel
result = DatabaseDAO.get_ssh_tunnel(1)
@@ -64,7 +64,7 @@ def test_delete_ssh_tunnel_command(
assert isinstance(result, SSHTunnel)
assert 1 == result.database_id
mocker.patch(
- "superset.databases.ssh_tunnel.commands.delete.is_feature_enabled",
+ "superset.commands.database.ssh_tunnel.delete.is_feature_enabled",
return_value=True,
)
DeleteSSHTunnelCommand(1).run()
diff --git a/tests/unit_tests/databases/ssh_tunnel/commands/update_test.py b/tests/unit_tests/databases/ssh_tunnel/commands/update_test.py
index d4a5faba8b145..5c3907b01635f 100644
--- a/tests/unit_tests/databases/ssh_tunnel/commands/update_test.py
+++ b/tests/unit_tests/databases/ssh_tunnel/commands/update_test.py
@@ -20,7 +20,7 @@
import pytest
from sqlalchemy.orm.session import Session
-from superset.databases.ssh_tunnel.commands.exceptions import SSHTunnelInvalidError
+from superset.commands.database.ssh_tunnel.exceptions import SSHTunnelInvalidError
@pytest.fixture
@@ -50,8 +50,8 @@ def session_with_data(session: Session) -> Iterator[Session]:
def test_update_shh_tunnel_command(session_with_data: Session) -> None:
+ from superset.commands.database.ssh_tunnel.update import UpdateSSHTunnelCommand
from superset.daos.database import DatabaseDAO
- from superset.databases.ssh_tunnel.commands.update import UpdateSSHTunnelCommand
from superset.databases.ssh_tunnel.models import SSHTunnel
result = DatabaseDAO.get_ssh_tunnel(1)
@@ -72,8 +72,8 @@ def test_update_shh_tunnel_command(session_with_data: Session) -> None:
def test_update_shh_tunnel_invalid_params(session_with_data: Session) -> None:
+ from superset.commands.database.ssh_tunnel.update import UpdateSSHTunnelCommand
from superset.daos.database import DatabaseDAO
- from superset.databases.ssh_tunnel.commands.update import UpdateSSHTunnelCommand
from superset.databases.ssh_tunnel.models import SSHTunnel
result = DatabaseDAO.get_ssh_tunnel(1)
diff --git a/tests/unit_tests/datasets/commands/export_test.py b/tests/unit_tests/datasets/commands/export_test.py
index be6a637f8fb38..20565da5bc5ae 100644
--- a/tests/unit_tests/datasets/commands/export_test.py
+++ b/tests/unit_tests/datasets/commands/export_test.py
@@ -25,8 +25,8 @@ def test_export(session: Session) -> None:
"""
Test exporting a dataset.
"""
+ from superset.commands.dataset.export import ExportDatasetsCommand
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
- from superset.datasets.commands.export import ExportDatasetsCommand
from superset.models.core import Database
engine = session.get_bind()
diff --git a/tests/unit_tests/datasets/commands/importers/v1/import_test.py b/tests/unit_tests/datasets/commands/importers/v1/import_test.py
index e8e8c8e7c59fc..5089838e693c1 100644
--- a/tests/unit_tests/datasets/commands/importers/v1/import_test.py
+++ b/tests/unit_tests/datasets/commands/importers/v1/import_test.py
@@ -28,11 +28,11 @@
from pytest_mock import MockFixture
from sqlalchemy.orm.session import Session
-from superset.datasets.commands.exceptions import (
+from superset.commands.dataset.exceptions import (
DatasetForbiddenDataURI,
ImportFailedError,
)
-from superset.datasets.commands.importers.v1.utils import validate_data_uri
+from superset.commands.dataset.importers.v1.utils import validate_data_uri
def test_import_dataset(mocker: MockFixture, session: Session) -> None:
@@ -40,8 +40,8 @@ def test_import_dataset(mocker: MockFixture, session: Session) -> None:
Test importing a dataset.
"""
from superset import security_manager
+ from superset.commands.dataset.importers.v1.utils import import_dataset
from superset.connectors.sqla.models import SqlaTable
- from superset.datasets.commands.importers.v1.utils import import_dataset
from superset.models.core import Database
mocker.patch.object(security_manager, "can_access", return_value=True)
@@ -156,8 +156,8 @@ def test_import_dataset_duplicate_column(mocker: MockFixture, session: Session)
"""
from superset import security_manager
from superset.columns.models import Column as NewColumn
+ from superset.commands.dataset.importers.v1.utils import import_dataset
from superset.connectors.sqla.models import SqlaTable, TableColumn
- from superset.datasets.commands.importers.v1.utils import import_dataset
from superset.models.core import Database
mocker.patch.object(security_manager, "can_access", return_value=True)
@@ -281,8 +281,8 @@ def test_import_column_extra_is_string(mocker: MockFixture, session: Session) ->
Test importing a dataset when the column extra is a string.
"""
from superset import security_manager
+ from superset.commands.dataset.importers.v1.utils import import_dataset
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
- from superset.datasets.commands.importers.v1.utils import import_dataset
from superset.datasets.schemas import ImportV1DatasetSchema
from superset.models.core import Database
@@ -366,8 +366,8 @@ def test_import_dataset_extra_empty_string(
Test importing a dataset when the extra field is an empty string.
"""
from superset import security_manager
+ from superset.commands.dataset.importers.v1.utils import import_dataset
from superset.connectors.sqla.models import SqlaTable
- from superset.datasets.commands.importers.v1.utils import import_dataset
from superset.datasets.schemas import ImportV1DatasetSchema
from superset.models.core import Database
@@ -422,7 +422,7 @@ def test_import_dataset_extra_empty_string(
assert sqla_table.extra == None
-@patch("superset.datasets.commands.importers.v1.utils.request")
+@patch("superset.commands.dataset.importers.v1.utils.request")
def test_import_column_allowed_data_url(
request: Mock,
mocker: MockFixture,
@@ -434,8 +434,8 @@ def test_import_column_allowed_data_url(
import io
from superset import security_manager
+ from superset.commands.dataset.importers.v1.utils import import_dataset
from superset.connectors.sqla.models import SqlaTable
- from superset.datasets.commands.importers.v1.utils import import_dataset
from superset.datasets.schemas import ImportV1DatasetSchema
from superset.models.core import Database
@@ -510,8 +510,8 @@ def test_import_dataset_managed_externally(
Test importing a dataset that is managed externally.
"""
from superset import security_manager
+ from superset.commands.dataset.importers.v1.utils import import_dataset
from superset.connectors.sqla.models import SqlaTable
- from superset.datasets.commands.importers.v1.utils import import_dataset
from superset.models.core import Database
from tests.integration_tests.fixtures.importexport import dataset_config
diff --git a/tests/unit_tests/db_engine_specs/test_doris.py b/tests/unit_tests/db_engine_specs/test_doris.py
new file mode 100644
index 0000000000000..d7444f8d2d62e
--- /dev/null
+++ b/tests/unit_tests/db_engine_specs/test_doris.py
@@ -0,0 +1,147 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from typing import Any, Optional
+
+import pytest
+from sqlalchemy import JSON, types
+from sqlalchemy.engine.url import make_url
+
+from superset.db_engine_specs.doris import (
+ AggState,
+ ARRAY,
+ BITMAP,
+ DOUBLE,
+ HLL,
+ LARGEINT,
+ MAP,
+ QuantileState,
+ STRUCT,
+ TINYINT,
+)
+from superset.utils.core import GenericDataType
+from tests.unit_tests.db_engine_specs.utils import assert_column_spec
+
+
+@pytest.mark.parametrize(
+ "native_type,sqla_type,attrs,generic_type,is_dttm",
+ [
+ # Numeric
+ ("tinyint", TINYINT, None, GenericDataType.NUMERIC, False),
+ ("largeint", LARGEINT, None, GenericDataType.NUMERIC, False),
+ ("decimal(38,18)", types.DECIMAL, None, GenericDataType.NUMERIC, False),
+ ("decimalv3(38,18)", types.DECIMAL, None, GenericDataType.NUMERIC, False),
+ ("double", DOUBLE, None, GenericDataType.NUMERIC, False),
+ # String
+ ("char(10)", types.CHAR, None, GenericDataType.STRING, False),
+ ("varchar(65533)", types.VARCHAR, None, GenericDataType.STRING, False),
+ ("binary", types.BINARY, None, GenericDataType.STRING, False),
+ ("text", types.TEXT, None, GenericDataType.STRING, False),
+ ("string", types.String, None, GenericDataType.STRING, False),
+ # Date
+ ("datetimev2", types.DateTime, None, GenericDataType.STRING, False),
+ ("datev2", types.Date, None, GenericDataType.STRING, False),
+ # Complex type
+ ("array
", ARRAY, None, GenericDataType.STRING, False),
+ ("map", MAP, None, GenericDataType.STRING, False),
+ ("struct", STRUCT, None, GenericDataType.STRING, False),
+ ("json", JSON, None, GenericDataType.STRING, False),
+ ("jsonb", JSON, None, GenericDataType.STRING, False),
+ ("bitmap", BITMAP, None, GenericDataType.STRING, False),
+ ("hll", HLL, None, GenericDataType.STRING, False),
+ ("quantile_state", QuantileState, None, GenericDataType.STRING, False),
+ ("agg_state", AggState, None, GenericDataType.STRING, False),
+ ],
+)
+def test_get_column_spec(
+ native_type: str,
+ sqla_type: type[types.TypeEngine],
+ attrs: Optional[dict[str, Any]],
+ generic_type: GenericDataType,
+ is_dttm: bool,
+) -> None:
+ from superset.db_engine_specs.doris import DorisEngineSpec as spec
+
+ assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm)
+
+
+@pytest.mark.parametrize(
+ "sqlalchemy_uri,connect_args,return_schema,return_connect_args",
+ [
+ (
+ "doris://user:password@host/db1",
+ {"param1": "some_value"},
+ "db1",
+ {"param1": "some_value"},
+ ),
+ (
+ "pydoris://user:password@host/db1",
+ {"param1": "some_value"},
+ "db1",
+ {"param1": "some_value"},
+ ),
+ (
+ "doris://user:password@host/catalog1.db1",
+ {"param1": "some_value"},
+ "catalog1.db1",
+ {"param1": "some_value"},
+ ),
+ (
+ "pydoris://user:password@host/catalog1.db1",
+ {"param1": "some_value"},
+ "catalog1.db1",
+ {"param1": "some_value"},
+ ),
+ ],
+)
+def test_adjust_engine_params(
+ sqlalchemy_uri: str,
+ connect_args: dict[str, Any],
+ return_schema: str,
+ return_connect_args: dict[str, Any],
+) -> None:
+ from superset.db_engine_specs.doris import DorisEngineSpec
+
+ url = make_url(sqlalchemy_uri)
+ returned_url, returned_connect_args = DorisEngineSpec.adjust_engine_params(
+ url, connect_args
+ )
+ assert returned_url.database == return_schema
+ assert returned_connect_args == return_connect_args
+
+
+def test_get_schema_from_engine_params() -> None:
+ """
+ Test the ``get_schema_from_engine_params`` method.
+ """
+ from superset.db_engine_specs.doris import DorisEngineSpec
+
+ assert (
+ DorisEngineSpec.get_schema_from_engine_params(
+ make_url("doris://localhost:9030/hive.test"),
+ {},
+ )
+ == "test"
+ )
+
+ assert (
+ DorisEngineSpec.get_schema_from_engine_params(
+ make_url("doris://localhost:9030/hive"),
+ {},
+ )
+ is None
+ )
diff --git a/tests/unit_tests/db_engine_specs/test_dremio.py b/tests/unit_tests/db_engine_specs/test_dremio.py
index 6b1e8203b5dfe..eb77e7f10a292 100644
--- a/tests/unit_tests/db_engine_specs/test_dremio.py
+++ b/tests/unit_tests/db_engine_specs/test_dremio.py
@@ -18,6 +18,7 @@
from typing import Optional
import pytest
+from pytest_mock import MockerFixture
from tests.unit_tests.db_engine_specs.utils import assert_convert_dttm
from tests.unit_tests.fixtures.common import dttm
@@ -40,3 +41,18 @@ def test_convert_dttm(
from superset.db_engine_specs.dremio import DremioEngineSpec as spec
assert_convert_dttm(spec, target_type, expected_result, dttm)
+
+
+def test_get_allows_alias_in_select(mocker: MockerFixture) -> None:
+ from superset.db_engine_specs.dremio import DremioEngineSpec
+
+ database = mocker.MagicMock()
+
+ database.get_extra.return_value = {}
+ assert DremioEngineSpec.get_allows_alias_in_select(database) is True
+
+ database.get_extra.return_value = {"version": "24.1.0"}
+ assert DremioEngineSpec.get_allows_alias_in_select(database) is True
+
+ database.get_extra.return_value = {"version": "24.0.0"}
+ assert DremioEngineSpec.get_allows_alias_in_select(database) is False
diff --git a/tests/unit_tests/db_engine_specs/test_redshift.py b/tests/unit_tests/db_engine_specs/test_redshift.py
new file mode 100644
index 0000000000000..ddd2c1a5eb2ea
--- /dev/null
+++ b/tests/unit_tests/db_engine_specs/test_redshift.py
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from datetime import datetime
+from typing import Optional
+
+import pytest
+
+from tests.unit_tests.db_engine_specs.utils import assert_convert_dttm
+from tests.unit_tests.fixtures.common import dttm
+
+
+@pytest.mark.parametrize(
+ "target_type,expected_result",
+ [
+ ("Date", "TO_DATE('2019-01-02', 'YYYY-MM-DD')"),
+ (
+ "DateTime",
+ "TO_TIMESTAMP('2019-01-02 03:04:05.678900', 'YYYY-MM-DD HH24:MI:SS.US')",
+ ),
+ (
+ "TimeStamp",
+ "TO_TIMESTAMP('2019-01-02 03:04:05.678900', 'YYYY-MM-DD HH24:MI:SS.US')",
+ ),
+ ("UnknownType", None),
+ ],
+)
+def test_convert_dttm(
+ target_type: str, expected_result: Optional[str], dttm: datetime
+) -> None:
+ from superset.db_engine_specs.redshift import RedshiftEngineSpec as spec
+
+ assert_convert_dttm(spec, target_type, expected_result, dttm)
diff --git a/tests/unit_tests/db_engine_specs/test_trino.py b/tests/unit_tests/db_engine_specs/test_trino.py
index 1b50a683a0841..15e55fc5af62f 100644
--- a/tests/unit_tests/db_engine_specs/test_trino.py
+++ b/tests/unit_tests/db_engine_specs/test_trino.py
@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, import-outside-toplevel, protected-access
+import copy
import json
from datetime import datetime
from typing import Any, Optional
@@ -24,9 +25,11 @@
import pytest
from pytest_mock import MockerFixture
from sqlalchemy import types
+from trino.sqlalchemy import datatype
import superset.config
from superset.constants import QUERY_CANCEL_KEY, QUERY_EARLY_CANCEL_KEY, USER_AGENT
+from superset.superset_typing import ResultSetColumnType, SQLAColumnType
from superset.utils.core import GenericDataType
from tests.unit_tests.db_engine_specs.utils import (
assert_column_spec,
@@ -35,6 +38,24 @@
from tests.unit_tests.fixtures.common import dttm
+def _assert_columns_equal(actual_cols, expected_cols) -> None:
+ """
+ Assert equality of the given cols, bearing in mind sqlalchemy type
+ instances can't be compared for equality, so will have to be converted to
+ strings first.
+ """
+ actual = copy.deepcopy(actual_cols)
+ expected = copy.deepcopy(expected_cols)
+
+ for col in actual:
+ col["type"] = str(col["type"])
+
+ for col in expected:
+ col["type"] = str(col["type"])
+
+ assert actual == expected
+
+
@pytest.mark.parametrize(
"extra,expected",
[
@@ -395,3 +416,104 @@ def _mock_execute(*args, **kwargs):
mock_query.set_extra_json_key.assert_called_once_with(
key=QUERY_CANCEL_KEY, value=query_id
)
+
+
+def test_get_columns(mocker: MockerFixture):
+ """Test that ROW columns are not expanded without expand_rows"""
+ from superset.db_engine_specs.trino import TrinoEngineSpec
+
+ field1_type = datatype.parse_sqltype("row(a varchar, b date)")
+ field2_type = datatype.parse_sqltype("row(r1 row(a varchar, b varchar))")
+ field3_type = datatype.parse_sqltype("int")
+
+ sqla_columns = [
+ SQLAColumnType(name="field1", type=field1_type, is_dttm=False),
+ SQLAColumnType(name="field2", type=field2_type, is_dttm=False),
+ SQLAColumnType(name="field3", type=field3_type, is_dttm=False),
+ ]
+ mock_inspector = mocker.MagicMock()
+ mock_inspector.get_columns.return_value = sqla_columns
+
+ actual = TrinoEngineSpec.get_columns(mock_inspector, "table", "schema")
+ expected = [
+ ResultSetColumnType(
+ name="field1", column_name="field1", type=field1_type, is_dttm=False
+ ),
+ ResultSetColumnType(
+ name="field2", column_name="field2", type=field2_type, is_dttm=False
+ ),
+ ResultSetColumnType(
+ name="field3", column_name="field3", type=field3_type, is_dttm=False
+ ),
+ ]
+
+ _assert_columns_equal(actual, expected)
+
+
+def test_get_columns_expand_rows(mocker: MockerFixture):
+ """Test that ROW columns are correctly expanded with expand_rows"""
+ from superset.db_engine_specs.trino import TrinoEngineSpec
+
+ field1_type = datatype.parse_sqltype("row(a varchar, b date)")
+ field2_type = datatype.parse_sqltype("row(r1 row(a varchar, b varchar))")
+ field3_type = datatype.parse_sqltype("int")
+
+ sqla_columns = [
+ SQLAColumnType(name="field1", type=field1_type, is_dttm=False),
+ SQLAColumnType(name="field2", type=field2_type, is_dttm=False),
+ SQLAColumnType(name="field3", type=field3_type, is_dttm=False),
+ ]
+ mock_inspector = mocker.MagicMock()
+ mock_inspector.get_columns.return_value = sqla_columns
+
+ actual = TrinoEngineSpec.get_columns(
+ mock_inspector, "table", "schema", {"expand_rows": True}
+ )
+ expected = [
+ ResultSetColumnType(
+ name="field1", column_name="field1", type=field1_type, is_dttm=False
+ ),
+ ResultSetColumnType(
+ name="field1.a",
+ column_name="field1.a",
+ type=types.VARCHAR(),
+ is_dttm=False,
+ query_as='"field1"."a" AS "field1.a"',
+ ),
+ ResultSetColumnType(
+ name="field1.b",
+ column_name="field1.b",
+ type=types.DATE(),
+ is_dttm=True,
+ query_as='"field1"."b" AS "field1.b"',
+ ),
+ ResultSetColumnType(
+ name="field2", column_name="field2", type=field2_type, is_dttm=False
+ ),
+ ResultSetColumnType(
+ name="field2.r1",
+ column_name="field2.r1",
+ type=datatype.parse_sqltype("row(a varchar, b varchar)"),
+ is_dttm=False,
+ query_as='"field2"."r1" AS "field2.r1"',
+ ),
+ ResultSetColumnType(
+ name="field2.r1.a",
+ column_name="field2.r1.a",
+ type=types.VARCHAR(),
+ is_dttm=False,
+ query_as='"field2"."r1"."a" AS "field2.r1.a"',
+ ),
+ ResultSetColumnType(
+ name="field2.r1.b",
+ column_name="field2.r1.b",
+ type=types.VARCHAR(),
+ is_dttm=False,
+ query_as='"field2"."r1"."b" AS "field2.r1.b"',
+ ),
+ ResultSetColumnType(
+ name="field3", column_name="field3", type=field3_type, is_dttm=False
+ ),
+ ]
+
+ _assert_columns_equal(actual, expected)
diff --git a/tests/unit_tests/explore/utils_test.py b/tests/unit_tests/explore/utils_test.py
index de39187ec7f68..fa99091f09e12 100644
--- a/tests/unit_tests/explore/utils_test.py
+++ b/tests/unit_tests/explore/utils_test.py
@@ -18,20 +18,20 @@
from pytest import raises
from pytest_mock import MockFixture
-from superset.charts.commands.exceptions import (
+from superset.commands.chart.exceptions import (
ChartAccessDeniedError,
ChartNotFoundError,
)
+from superset.commands.dataset.exceptions import (
+ DatasetAccessDeniedError,
+ DatasetNotFoundError,
+)
from superset.commands.exceptions import (
DatasourceNotFoundValidationError,
DatasourceTypeInvalidError,
OwnersNotFoundValidationError,
QueryNotFoundValidationError,
)
-from superset.datasets.commands.exceptions import (
- DatasetAccessDeniedError,
- DatasetNotFoundError,
-)
from superset.exceptions import SupersetSecurityException
from superset.utils.core import DatasourceType, override_user
diff --git a/tests/unit_tests/fixtures/dataframes.py b/tests/unit_tests/fixtures/dataframes.py
index 31a275b735ac7..e1499792cba07 100644
--- a/tests/unit_tests/fixtures/dataframes.py
+++ b/tests/unit_tests/fixtures/dataframes.py
@@ -130,6 +130,11 @@
data={"label": ["x", "y", "z", "q"], "y": [1.0, 2.0, 3.0, 4.0]},
)
+timeseries_with_gap_df = DataFrame(
+ index=to_datetime(["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]),
+ data={"label": ["x", "y", "z", "q"], "y": [1.0, 2.0, None, 4.0]},
+)
+
timeseries_df2 = DataFrame(
index=to_datetime(["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]),
data={
diff --git a/tests/unit_tests/jinja_context_test.py b/tests/unit_tests/jinja_context_test.py
index 114f046300169..e2a5e8cd49280 100644
--- a/tests/unit_tests/jinja_context_test.py
+++ b/tests/unit_tests/jinja_context_test.py
@@ -22,7 +22,7 @@
from pytest_mock import MockFixture
from sqlalchemy.dialects import mysql
-from superset.datasets.commands.exceptions import DatasetNotFoundError
+from superset.commands.dataset.exceptions import DatasetNotFoundError
from superset.jinja_context import dataset_macro, WhereInMacro
diff --git a/tests/unit_tests/migrations/viz/dual_line_to_mixed_chart_test.py b/tests/unit_tests/migrations/viz/dual_line_to_mixed_chart_test.py
index 76addd8009e4b..3d9dc53122440 100644
--- a/tests/unit_tests/migrations/viz/dual_line_to_mixed_chart_test.py
+++ b/tests/unit_tests/migrations/viz/dual_line_to_mixed_chart_test.py
@@ -14,9 +14,10 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-import json
+from typing import Any
from superset.migrations.shared.migrate_viz import MigrateDualLine
+from tests.unit_tests.migrations.viz.utils import migrate_and_assert
ADHOC_FILTERS = [
{
@@ -28,7 +29,7 @@
}
]
-SOURCE_FORM_DATA = {
+SOURCE_FORM_DATA: dict[str, Any] = {
"metric": "num_boys",
"y_axis_format": ",d",
"y_axis_bounds": [50, 100],
@@ -42,7 +43,7 @@
"yAxisIndex": 0,
}
-TARGET_FORM_DATA = {
+TARGET_FORM_DATA: dict[str, Any] = {
"metrics": ["num_boys"],
"y_axis_format": ",d",
"y_axis_bounds": [50, 100],
@@ -64,34 +65,4 @@
def test_migration() -> None:
source = SOURCE_FORM_DATA.copy()
target = TARGET_FORM_DATA.copy()
- upgrade_downgrade(source, target)
-
-
-def upgrade_downgrade(source, target) -> None:
- from superset.models.slice import Slice
-
- dumped_form_data = json.dumps(source)
-
- slc = Slice(
- viz_type=MigrateDualLine.source_viz_type,
- datasource_type="table",
- params=dumped_form_data,
- query_context=f'{{"form_data": {dumped_form_data}}}',
- )
-
- # upgrade
- slc = MigrateDualLine.upgrade_slice(slc)
-
- # verify form_data
- new_form_data = json.loads(slc.params)
- assert new_form_data == target
- assert new_form_data["form_data_bak"] == source
-
- # verify query_context
- new_query_context = json.loads(slc.query_context)
- assert new_query_context["form_data"]["viz_type"] == "mixed_timeseries"
-
- # downgrade
- slc = MigrateDualLine.downgrade_slice(slc)
- assert slc.viz_type == MigrateDualLine.source_viz_type
- assert json.loads(slc.params) == source
+ migrate_and_assert(MigrateDualLine, source, target)
diff --git a/superset/examples/configs/charts/Vehicle_Sales_Filter.yaml b/tests/unit_tests/migrations/viz/nvd3_area_chart_to_echarts_test.py
similarity index 51%
rename from superset/examples/configs/charts/Vehicle_Sales_Filter.yaml
rename to tests/unit_tests/migrations/viz/nvd3_area_chart_to_echarts_test.py
index 91c8f76bb8d8f..a6b87c6d7aa3b 100644
--- a/superset/examples/configs/charts/Vehicle_Sales_Filter.yaml
+++ b/tests/unit_tests/migrations/viz/nvd3_area_chart_to_echarts_test.py
@@ -14,34 +14,29 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-slice_name: Vehicle Sales Filter
-viz_type: filter_box
-params:
- adhoc_filters: []
- datasource: 23__table
- date_filter: true
- filter_configs:
- - asc: true
- clearable: true
- column: product_line
- key: 7oUjq15eQ
- label: Product Line
- multiple: true
- searchAllOptions: false
- - asc: true
- clearable: true
- column: deal_size
- key: c3hO6Eub8
- label: Deal Size
- multiple: true
- searchAllOptions: false
- granularity_sqla: order_date
- queryFields: {}
- slice_id: 671
- time_range: '2003-01-01T00:00:00 : 2005-06-01T00:00:00'
- url_params: {}
- viz_type: filter_box
-cache_timeout: null
-uuid: a5689df7-98fc-7c51-602c-ebd92dc3ec70
-version: 1.0.0
-dataset_uuid: e8623bb9-5e00-f531-506a-19607f5f8005
+from typing import Any
+
+from superset.migrations.shared.migrate_viz import MigrateAreaChart
+from tests.unit_tests.migrations.viz.utils import (
+ migrate_and_assert,
+ TIMESERIES_SOURCE_FORM_DATA,
+ TIMESERIES_TARGET_FORM_DATA,
+)
+
+SOURCE_FORM_DATA: dict[str, Any] = {
+ "viz_type": "area",
+ "stacked_style": "stream",
+}
+
+TARGET_FORM_DATA: dict[str, Any] = {
+ "form_data_bak": SOURCE_FORM_DATA,
+ "viz_type": "echarts_area",
+ "opacity": 0.7,
+ "stack": "Stream",
+}
+
+
+def test_migration() -> None:
+ SOURCE_FORM_DATA.update(TIMESERIES_SOURCE_FORM_DATA)
+ TARGET_FORM_DATA.update(TIMESERIES_TARGET_FORM_DATA)
+ migrate_and_assert(MigrateAreaChart, SOURCE_FORM_DATA, TARGET_FORM_DATA)
diff --git a/tests/unit_tests/migrations/viz/nvd3_bubble_chart_to_echarts_test.py b/tests/unit_tests/migrations/viz/nvd3_bubble_chart_to_echarts_test.py
new file mode 100644
index 0000000000000..070083b7ae129
--- /dev/null
+++ b/tests/unit_tests/migrations/viz/nvd3_bubble_chart_to_echarts_test.py
@@ -0,0 +1,76 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+from typing import Any
+
+from superset.migrations.shared.migrate_viz import MigrateBubbleChart
+from tests.unit_tests.migrations.viz.utils import migrate_and_assert
+
+SOURCE_FORM_DATA: dict[str, Any] = {
+ "adhoc_filters": [],
+ "bottom_margin": 20,
+ "color_scheme": "default",
+ "entity": "count",
+ "left_margin": 20,
+ "limit": 100,
+ "max_bubble_size": 50,
+ "series": ["region"],
+ "show_legend": True,
+ "size": 75,
+ "viz_type": "bubble",
+ "x": "year",
+ "x_axis_format": "SMART_DATE",
+ "x_axis_label": "Year",
+ "x_axis_showminmax": True,
+ "x_log_scale": True,
+ "x_ticks_layout": "45°",
+ "y": "country",
+ "y_axis_bounds": [0, 100],
+ "y_axis_format": "SMART_DATE",
+ "y_axis_label": "Year",
+ "y_axis_showminmax": False,
+ "y_log_scale": True,
+}
+
+TARGET_FORM_DATA: dict[str, Any] = {
+ "adhoc_filters": [],
+ "color_scheme": "default",
+ "entity": "count",
+ "form_data_bak": SOURCE_FORM_DATA,
+ "logXAxis": True,
+ "logYAxis": True,
+ "max_bubble_size": 50,
+ "row_limit": 100,
+ "series": ["region"],
+ "show_legend": True,
+ "size": 75,
+ "truncateYAxis": True,
+ "viz_type": "bubble_v2",
+ "x": "year",
+ "xAxisFormat": "SMART_DATE",
+ "xAxisLabelRotation": 45,
+ "x_axis_label": "Year",
+ "x_axis_title_margin": 20,
+ "y": "country",
+ "y_axis_bounds": [0, 100],
+ "y_axis_format": "SMART_DATE",
+ "y_axis_label": "Year",
+ "y_axis_title_margin": 20,
+}
+
+
+def test_migration() -> None:
+ migrate_and_assert(MigrateBubbleChart, SOURCE_FORM_DATA, TARGET_FORM_DATA)
diff --git a/tests/unit_tests/migrations/viz/nvd3_line_chart_to_echarts_test.py b/tests/unit_tests/migrations/viz/nvd3_line_chart_to_echarts_test.py
new file mode 100644
index 0000000000000..5999a907021ba
--- /dev/null
+++ b/tests/unit_tests/migrations/viz/nvd3_line_chart_to_echarts_test.py
@@ -0,0 +1,39 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+from typing import Any
+
+from superset.migrations.shared.migrate_viz import MigrateLineChart
+from tests.unit_tests.migrations.viz.utils import (
+ migrate_and_assert,
+ TIMESERIES_SOURCE_FORM_DATA,
+ TIMESERIES_TARGET_FORM_DATA,
+)
+
+SOURCE_FORM_DATA: dict[str, Any] = {
+ "viz_type": "line",
+}
+
+TARGET_FORM_DATA: dict[str, Any] = {
+ "form_data_bak": SOURCE_FORM_DATA,
+ "viz_type": "echarts_timeseries_line",
+}
+
+
+def test_migration() -> None:
+ SOURCE_FORM_DATA.update(TIMESERIES_SOURCE_FORM_DATA)
+ TARGET_FORM_DATA.update(TIMESERIES_TARGET_FORM_DATA)
+ migrate_and_assert(MigrateLineChart, SOURCE_FORM_DATA, TARGET_FORM_DATA)
diff --git a/tests/unit_tests/migrations/viz/pivot_table_v1_v2_test.py b/tests/unit_tests/migrations/viz/pivot_table_v1_v2_test.py
index 1e2229ca83082..788fd14770e0c 100644
--- a/tests/unit_tests/migrations/viz/pivot_table_v1_v2_test.py
+++ b/tests/unit_tests/migrations/viz/pivot_table_v1_v2_test.py
@@ -14,122 +14,40 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-import json
+from typing import Any
from superset.migrations.shared.migrate_viz import MigratePivotTable
-from tests.unit_tests.conftest import with_feature_flags
+from tests.unit_tests.migrations.viz.utils import migrate_and_assert
-SOURCE_FORM_DATA = {
- "adhoc_filters": [],
+SOURCE_FORM_DATA: dict[str, Any] = {
"any_other_key": "untouched",
"columns": ["state"],
"combine_metric": True,
- "granularity_sqla": "ds",
"groupby": ["name"],
"number_format": "SMART_NUMBER",
"pandas_aggfunc": "sum",
"pivot_margins": True,
- "time_range": "100 years ago : now",
"timeseries_limit_metric": "count",
"transpose_pivot": True,
"viz_type": "pivot_table",
}
-TARGET_FORM_DATA = {
- "adhoc_filters": [],
+TARGET_FORM_DATA: dict[str, Any] = {
"any_other_key": "untouched",
"aggregateFunction": "Sum",
"colTotals": True,
"colSubTotals": True,
"combineMetric": True,
"form_data_bak": SOURCE_FORM_DATA,
- "granularity_sqla": "ds",
"groupbyColumns": ["state"],
"groupbyRows": ["name"],
"rowOrder": "value_z_to_a",
"series_limit_metric": "count",
- "time_range": "100 years ago : now",
"transposePivot": True,
"valueFormat": "SMART_NUMBER",
"viz_type": "pivot_table_v2",
}
-@with_feature_flags(GENERIC_CHART_AXES=False)
-def test_migration_without_generic_chart_axes() -> None:
- source = SOURCE_FORM_DATA.copy()
- target = TARGET_FORM_DATA.copy()
- upgrade_downgrade(source, target)
-
-
-@with_feature_flags(GENERIC_CHART_AXES=True)
-def test_migration_with_generic_chart_axes() -> None:
- source = SOURCE_FORM_DATA.copy()
- target = TARGET_FORM_DATA.copy()
- target["adhoc_filters"] = [
- {
- "clause": "WHERE",
- "comparator": "100 years ago : now",
- "expressionType": "SIMPLE",
- "operator": "TEMPORAL_RANGE",
- "subject": "ds",
- }
- ]
- target.pop("granularity_sqla")
- target.pop("time_range")
- upgrade_downgrade(source, target)
-
-
-@with_feature_flags(GENERIC_CHART_AXES=True)
-def test_custom_sql_time_column() -> None:
- source = SOURCE_FORM_DATA.copy()
- source["granularity_sqla"] = {
- "expressionType": "SQL",
- "label": "ds",
- "sqlExpression": "sum(ds)",
- }
- target = TARGET_FORM_DATA.copy()
- target["adhoc_filters"] = [
- {
- "clause": "WHERE",
- "comparator": None,
- "expressionType": "SQL",
- "operator": "TEMPORAL_RANGE",
- "sqlExpression": "sum(ds)",
- "subject": "ds",
- }
- ]
- target["form_data_bak"] = source
- target.pop("granularity_sqla")
- target.pop("time_range")
- upgrade_downgrade(source, target)
-
-
-def upgrade_downgrade(source, target) -> None:
- from superset.models.slice import Slice
-
- dumped_form_data = json.dumps(source)
-
- slc = Slice(
- viz_type=MigratePivotTable.source_viz_type,
- datasource_type="table",
- params=dumped_form_data,
- query_context=f'{{"form_data": {dumped_form_data}}}',
- )
-
- # upgrade
- slc = MigratePivotTable.upgrade_slice(slc)
-
- # verify form_data
- new_form_data = json.loads(slc.params)
- assert new_form_data == target
- assert new_form_data["form_data_bak"] == source
-
- # verify query_context
- new_query_context = json.loads(slc.query_context)
- assert new_query_context["form_data"]["viz_type"] == "pivot_table_v2"
-
- # downgrade
- slc = MigratePivotTable.downgrade_slice(slc)
- assert slc.viz_type == MigratePivotTable.source_viz_type
- assert json.loads(slc.params) == source
+def test_migration() -> None:
+ migrate_and_assert(MigratePivotTable, SOURCE_FORM_DATA, TARGET_FORM_DATA)
diff --git a/tests/unit_tests/migrations/viz/time_related_fields_test.py b/tests/unit_tests/migrations/viz/time_related_fields_test.py
new file mode 100644
index 0000000000000..06fdf611ce132
--- /dev/null
+++ b/tests/unit_tests/migrations/viz/time_related_fields_test.py
@@ -0,0 +1,89 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+from typing import Any
+
+from superset.migrations.shared.migrate_viz import MigratePivotTable
+from tests.unit_tests.conftest import with_feature_flags
+from tests.unit_tests.migrations.viz.utils import migrate_and_assert
+
+SOURCE_FORM_DATA: dict[str, Any] = {
+ "granularity_sqla": "ds",
+ "time_range": "100 years ago : now",
+ "viz_type": "pivot_table",
+}
+
+TARGET_FORM_DATA: dict[str, Any] = {
+ "form_data_bak": SOURCE_FORM_DATA,
+ "granularity_sqla": "ds",
+ "rowOrder": "value_z_to_a",
+ "time_range": "100 years ago : now",
+ "viz_type": "pivot_table_v2",
+}
+
+
+@with_feature_flags(GENERIC_CHART_AXES=False)
+def test_migration_without_generic_chart_axes() -> None:
+ source = SOURCE_FORM_DATA.copy()
+ target = TARGET_FORM_DATA.copy()
+ upgrade_downgrade(source, target)
+
+
+@with_feature_flags(GENERIC_CHART_AXES=True)
+def test_migration_with_generic_chart_axes() -> None:
+ source = SOURCE_FORM_DATA.copy()
+ target = TARGET_FORM_DATA.copy()
+ target["adhoc_filters"] = [
+ {
+ "clause": "WHERE",
+ "comparator": "100 years ago : now",
+ "expressionType": "SIMPLE",
+ "operator": "TEMPORAL_RANGE",
+ "subject": "ds",
+ }
+ ]
+ target.pop("granularity_sqla")
+ target.pop("time_range")
+ upgrade_downgrade(source, target)
+
+
+@with_feature_flags(GENERIC_CHART_AXES=True)
+def test_custom_sql_time_column() -> None:
+ source = SOURCE_FORM_DATA.copy()
+ source["granularity_sqla"] = {
+ "expressionType": "SQL",
+ "label": "ds",
+ "sqlExpression": "sum(ds)",
+ }
+ target = TARGET_FORM_DATA.copy()
+ target["adhoc_filters"] = [
+ {
+ "clause": "WHERE",
+ "comparator": None,
+ "expressionType": "SQL",
+ "operator": "TEMPORAL_RANGE",
+ "sqlExpression": "sum(ds)",
+ "subject": "ds",
+ }
+ ]
+ target["form_data_bak"] = source
+ target.pop("granularity_sqla")
+ target.pop("time_range")
+ upgrade_downgrade(source, target)
+
+
+def upgrade_downgrade(source, target) -> None:
+ migrate_and_assert(MigratePivotTable, source, target)
diff --git a/tests/unit_tests/migrations/viz/utils.py b/tests/unit_tests/migrations/viz/utils.py
new file mode 100644
index 0000000000000..9da90c853fe19
--- /dev/null
+++ b/tests/unit_tests/migrations/viz/utils.py
@@ -0,0 +1,96 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import json
+from typing import Any
+
+from superset.migrations.shared.migrate_viz import MigrateViz
+
+TIMESERIES_SOURCE_FORM_DATA: dict[str, Any] = {
+ "bottom_margin": 20,
+ "comparison_type": "absolute",
+ "contribution": True,
+ "left_margin": 20,
+ "rich_tooltip": True,
+ "rolling_type": "sum",
+ "show_brush": "yes",
+ "show_controls": True,
+ "show_legend": True,
+ "show_markers": True,
+ "time_compare": "1 year",
+ "x_axis_label": "x",
+ "x_axis_format": "SMART_DATE",
+ "x_ticks_layout": "45°",
+ "y_axis_bounds": [0, 100],
+ "y_axis_format": "SMART_NUMBER",
+ "y_axis_label": "y",
+ "y_axis_showminmax": True,
+ "y_log_scale": True,
+}
+
+TIMESERIES_TARGET_FORM_DATA: dict[str, Any] = {
+ "comparison_type": "difference",
+ "contributionMode": "row",
+ "logAxis": True,
+ "markerEnabled": True,
+ "rich_tooltip": True,
+ "rolling_type": "sum",
+ "show_extra_controls": True,
+ "show_legend": True,
+ "time_compare": ["1 year ago"],
+ "truncateYAxis": True,
+ "x_axis_title_margin": 20,
+ "y_axis_title_margin": 20,
+ "x_axis_title": "x",
+ "x_axis_time_format": "SMART_DATE",
+ "xAxisLabelRotation": 45,
+ "y_axis_bounds": [0, 100],
+ "y_axis_format": "SMART_NUMBER",
+ "y_axis_title": "y",
+ "zoomable": True,
+}
+
+
+def migrate_and_assert(
+ cls: type[MigrateViz], source: dict[str, Any], target: dict[str, Any]
+) -> None:
+ from superset.models.slice import Slice
+
+ dumped_form_data = json.dumps(source)
+
+ slc = Slice(
+ viz_type=cls.source_viz_type,
+ datasource_type="table",
+ params=dumped_form_data,
+ query_context=f'{{"form_data": {dumped_form_data}}}',
+ )
+
+ # upgrade
+ cls.upgrade_slice(slc)
+
+ # verify form_data
+ new_form_data = json.loads(slc.params)
+ assert new_form_data == target
+ assert new_form_data["form_data_bak"] == source
+
+ # verify query_context
+ new_query_context = json.loads(slc.query_context)
+ assert new_query_context["form_data"]["viz_type"] == cls.target_viz_type
+
+ # downgrade
+ cls.downgrade_slice(slc)
+ assert slc.viz_type == cls.source_viz_type
+ assert json.loads(slc.params) == source
diff --git a/tests/unit_tests/pandas_postprocessing/test_cum.py b/tests/unit_tests/pandas_postprocessing/test_cum.py
index 130e0602520a1..25d7fd045f57b 100644
--- a/tests/unit_tests/pandas_postprocessing/test_cum.py
+++ b/tests/unit_tests/pandas_postprocessing/test_cum.py
@@ -24,6 +24,7 @@
multiple_metrics_df,
single_metric_df,
timeseries_df,
+ timeseries_with_gap_df,
)
from tests.unit_tests.pandas_postprocessing.utils import series_to_list
@@ -77,6 +78,19 @@ def test_cum():
)
+def test_cum_with_gap():
+ # create new column (cumsum)
+ post_df = pp.cum(
+ df=timeseries_with_gap_df,
+ columns={"y": "y2"},
+ operator="sum",
+ )
+ assert post_df.columns.tolist() == ["label", "y", "y2"]
+ assert series_to_list(post_df["label"]) == ["x", "y", "z", "q"]
+ assert series_to_list(post_df["y"]) == [1.0, 2.0, None, 4.0]
+ assert series_to_list(post_df["y2"]) == [1.0, 3.0, 3.0, 7.0]
+
+
def test_cum_after_pivot_with_single_metric():
pivot_df = pp.pivot(
df=single_metric_df,
diff --git a/tests/unit_tests/reports/notifications/slack_tests.py b/tests/unit_tests/reports/notifications/slack_tests.py
new file mode 100644
index 0000000000000..0a5e9baa466fa
--- /dev/null
+++ b/tests/unit_tests/reports/notifications/slack_tests.py
@@ -0,0 +1,58 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import pandas as pd
+
+
+def test_get_channel_with_multi_recipients() -> None:
+ """
+ Test the _get_channel function to ensure it will return a string
+ with recipients separated by commas without interstitial spacing
+ """
+ from superset.reports.models import ReportRecipients, ReportRecipientType
+ from superset.reports.notifications.base import NotificationContent
+ from superset.reports.notifications.slack import SlackNotification
+
+ content = NotificationContent(
+ name="test alert",
+ header_data={
+ "notification_format": "PNG",
+ "notification_type": "Alert",
+ "owners": [1],
+ "notification_source": None,
+ "chart_id": None,
+ "dashboard_id": None,
+ },
+ embedded_data=pd.DataFrame(
+ {
+ "A": [1, 2, 3],
+ "B": [4, 5, 6],
+ "C": ["111", "222", '333'],
+ }
+ ),
+ description='This is a test alert
',
+ )
+ slack_notification = SlackNotification(
+ recipient=ReportRecipients(
+ type=ReportRecipientType.SLACK,
+ recipient_config_json='{"target": "some_channel; second_channel, third_channel"}',
+ ),
+ content=content,
+ )
+
+ result = slack_notification._get_channel()
+
+ assert result == "some_channel,second_channel,third_channel"
diff --git a/tests/unit_tests/sql_lab_test.py b/tests/unit_tests/sql_lab_test.py
index edc1fd2ec4a5d..200ee091ec558 100644
--- a/tests/unit_tests/sql_lab_test.py
+++ b/tests/unit_tests/sql_lab_test.py
@@ -87,7 +87,7 @@ def test_execute_sql_statement_with_rls(
cursor = mocker.MagicMock()
SupersetResultSet = mocker.patch("superset.sql_lab.SupersetResultSet")
mocker.patch(
- "superset.sql_lab.insert_rls",
+ "superset.sql_lab.insert_rls_as_subquery",
return_value=sqlparse.parse("SELECT * FROM sales WHERE organization_id=42")[0],
)
mocker.patch("superset.sql_lab.is_feature_enabled", return_value=True)
@@ -112,12 +112,12 @@ def test_execute_sql_statement_with_rls(
SupersetResultSet.assert_called_with([(42,)], cursor.description, db_engine_spec)
-def test_sql_lab_insert_rls(
+def test_sql_lab_insert_rls_as_subquery(
mocker: MockerFixture,
session: Session,
) -> None:
"""
- Integration test for `insert_rls`.
+ Integration test for `insert_rls_as_subquery`.
"""
from flask_appbuilder.security.sqla.models import Role, User
@@ -213,4 +213,7 @@ def test_sql_lab_insert_rls(
| 2 | 8 |
| 3 | 9 |""".strip()
)
- assert query.executed_sql == "SELECT c FROM t WHERE (t.c > 5)\nLIMIT 6"
+ assert (
+ query.executed_sql
+ == "SELECT c FROM (SELECT * FROM t WHERE (t.c > 5)) AS t\nLIMIT 6"
+ )
diff --git a/tests/unit_tests/sql_parse_tests.py b/tests/unit_tests/sql_parse_tests.py
index 341ba9d789396..efd883810147e 100644
--- a/tests/unit_tests/sql_parse_tests.py
+++ b/tests/unit_tests/sql_parse_tests.py
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-# pylint: disable=invalid-name, redefined-outer-name, unused-argument, protected-access, too-many-lines
+# pylint: disable=invalid-name, redefined-outer-name, too-many-lines
from typing import Optional
@@ -31,7 +31,8 @@
extract_table_references,
get_rls_for_table,
has_table_query,
- insert_rls,
+ insert_rls_as_subquery,
+ insert_rls_in_predicate,
ParsedQuery,
sanitize_clause,
strip_comments_from_sql,
@@ -1318,6 +1319,184 @@ def test_has_table_query(sql: str, expected: bool) -> None:
assert has_table_query(statement) == expected
+@pytest.mark.parametrize(
+ "sql,table,rls,expected",
+ [
+ # Basic test
+ (
+ "SELECT * FROM some_table WHERE 1=1",
+ "some_table",
+ "id=42",
+ (
+ "SELECT * FROM (SELECT * FROM some_table WHERE some_table.id=42) "
+ "AS some_table WHERE 1=1"
+ ),
+ ),
+ # Here "table" is a reserved word; since sqlparse is too aggressive when
+ # characterizing reserved words we need to support them even when not quoted.
+ (
+ "SELECT * FROM table WHERE 1=1",
+ "table",
+ "id=42",
+ "SELECT * FROM (SELECT * FROM table WHERE table.id=42) AS table WHERE 1=1",
+ ),
+ # RLS is only applied to queries reading from the associated table
+ (
+ "SELECT * FROM table WHERE 1=1",
+ "other_table",
+ "id=42",
+ "SELECT * FROM table WHERE 1=1",
+ ),
+ (
+ "SELECT * FROM other_table WHERE 1=1",
+ "table",
+ "id=42",
+ "SELECT * FROM other_table WHERE 1=1",
+ ),
+ # JOINs are supported
+ (
+ "SELECT * FROM table JOIN other_table ON table.id = other_table.id",
+ "other_table",
+ "id=42",
+ (
+ "SELECT * FROM table JOIN "
+ "(SELECT * FROM other_table WHERE other_table.id=42) AS other_table "
+ "ON table.id = other_table.id"
+ ),
+ ),
+ # Subqueries
+ (
+ "SELECT * FROM (SELECT * FROM other_table)",
+ "other_table",
+ "id=42",
+ (
+ "SELECT * FROM (SELECT * FROM ("
+ "SELECT * FROM other_table WHERE other_table.id=42"
+ ") AS other_table)"
+ ),
+ ),
+ # UNION
+ (
+ "SELECT * FROM table UNION ALL SELECT * FROM other_table",
+ "table",
+ "id=42",
+ (
+ "SELECT * FROM (SELECT * FROM table WHERE table.id=42) AS table "
+ "UNION ALL SELECT * FROM other_table"
+ ),
+ ),
+ (
+ "SELECT * FROM table UNION ALL SELECT * FROM other_table",
+ "other_table",
+ "id=42",
+ (
+ "SELECT * FROM table UNION ALL SELECT * FROM ("
+ "SELECT * FROM other_table WHERE other_table.id=42) AS other_table"
+ ),
+ ),
+ # When comparing fully qualified table names (eg, schema.table) to simple names
+ # (eg, table) we are also conservative, assuming the schema is the same, since
+ # we don't have information on the default schema.
+ (
+ "SELECT * FROM schema.table_name",
+ "table_name",
+ "id=42",
+ (
+ "SELECT * FROM (SELECT * FROM schema.table_name "
+ "WHERE table_name.id=42) AS table_name"
+ ),
+ ),
+ (
+ "SELECT * FROM schema.table_name",
+ "schema.table_name",
+ "id=42",
+ (
+ "SELECT * FROM (SELECT * FROM schema.table_name "
+ "WHERE schema.table_name.id=42) AS table_name"
+ ),
+ ),
+ (
+ "SELECT * FROM table_name",
+ "schema.table_name",
+ "id=42",
+ (
+ "SELECT * FROM (SELECT * FROM table_name WHERE "
+ "schema.table_name.id=42) AS table_name"
+ ),
+ ),
+ # Aliases
+ (
+ "SELECT a.*, b.* FROM tbl_a AS a INNER JOIN tbl_b AS b ON a.col = b.col",
+ "tbl_a",
+ "id=42",
+ (
+ "SELECT a.*, b.* FROM "
+ "(SELECT * FROM tbl_a WHERE tbl_a.id=42) AS a "
+ "INNER JOIN tbl_b AS b "
+ "ON a.col = b.col"
+ ),
+ ),
+ (
+ "SELECT a.*, b.* FROM tbl_a a INNER JOIN tbl_b b ON a.col = b.col",
+ "tbl_a",
+ "id=42",
+ (
+ "SELECT a.*, b.* FROM "
+ "(SELECT * FROM tbl_a WHERE tbl_a.id=42) AS a "
+ "INNER JOIN tbl_b b ON a.col = b.col"
+ ),
+ ),
+ ],
+)
+def test_insert_rls_as_subquery(
+ mocker: MockerFixture, sql: str, table: str, rls: str, expected: str
+) -> None:
+ """
+ Insert into a statement a given RLS condition associated with a table.
+ """
+ condition = sqlparse.parse(rls)[0]
+ add_table_name(condition, table)
+
+ # pylint: disable=unused-argument
+ def get_rls_for_table(
+ candidate: Token,
+ database_id: int,
+ default_schema: str,
+ ) -> Optional[TokenList]:
+ """
+ Return the RLS ``condition`` if ``candidate`` matches ``table``.
+ """
+ if not isinstance(candidate, Identifier):
+ candidate = Identifier([Token(Name, candidate.value)])
+
+ candidate_table = ParsedQuery.get_table(candidate)
+ if not candidate_table:
+ return None
+ candidate_table_name = (
+ f"{candidate_table.schema}.{candidate_table.table}"
+ if candidate_table.schema
+ else candidate_table.table
+ )
+ for left, right in zip(
+ candidate_table_name.split(".")[::-1], table.split(".")[::-1]
+ ):
+ if left != right:
+ return None
+ return condition
+
+ mocker.patch("superset.sql_parse.get_rls_for_table", new=get_rls_for_table)
+
+ statement = sqlparse.parse(sql)[0]
+ assert (
+ str(
+ insert_rls_as_subquery(
+ token_list=statement, database_id=1, default_schema="my_schema"
+ )
+ ).strip()
+ == expected.strip()
+ )
+
+
@pytest.mark.parametrize(
"sql,table,rls,expected",
[
@@ -1492,7 +1671,7 @@ def test_has_table_query(sql: str, expected: bool) -> None:
),
],
)
-def test_insert_rls(
+def test_insert_rls_in_predicate(
mocker: MockerFixture, sql: str, table: str, rls: str, expected: str
) -> None:
"""
@@ -1521,7 +1700,11 @@ def get_rls_for_table(
statement = sqlparse.parse(sql)[0]
assert (
str(
- insert_rls(token_list=statement, database_id=1, default_schema="my_schema")
+ insert_rls_in_predicate(
+ token_list=statement,
+ database_id=1,
+ default_schema="my_schema",
+ )
).strip()
== expected.strip()
)
diff --git a/tests/unit_tests/tags/commands/create_test.py b/tests/unit_tests/tags/commands/create_test.py
index d4143bd4ae879..ca31e44566ecf 100644
--- a/tests/unit_tests/tags/commands/create_test.py
+++ b/tests/unit_tests/tags/commands/create_test.py
@@ -49,13 +49,13 @@ def session_with_data(session: Session):
def test_create_command_success(session_with_data: Session, mocker: MockFixture):
+ from superset.commands.tag.create import CreateCustomTagWithRelationshipsCommand
from superset.connectors.sqla.models import SqlaTable
from superset.daos.tag import TagDAO
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.models.sql_lab import Query, SavedQuery
- from superset.tags.commands.create import CreateCustomTagWithRelationshipsCommand
- from superset.tags.models import ObjectTypes, TaggedObject
+ from superset.tags.models import ObjectType, TaggedObject
# Define a list of objects to tag
query = session_with_data.query(SavedQuery).first()
@@ -69,9 +69,9 @@ def test_create_command_success(session_with_data: Session, mocker: MockFixture)
mocker.patch("superset.daos.query.SavedQueryDAO.find_by_id", return_value=query)
objects_to_tag = [
- (ObjectTypes.query, query.id),
- (ObjectTypes.chart, chart.id),
- (ObjectTypes.dashboard, dashboard.id),
+ (ObjectType.query, query.id),
+ (ObjectType.chart, chart.id),
+ (ObjectType.dashboard, dashboard.id),
]
CreateCustomTagWithRelationshipsCommand(
@@ -92,13 +92,13 @@ def test_create_command_success(session_with_data: Session, mocker: MockFixture)
def test_create_command_success_clear(session_with_data: Session, mocker: MockFixture):
+ from superset.commands.tag.create import CreateCustomTagWithRelationshipsCommand
from superset.connectors.sqla.models import SqlaTable
from superset.daos.tag import TagDAO
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.models.sql_lab import Query, SavedQuery
- from superset.tags.commands.create import CreateCustomTagWithRelationshipsCommand
- from superset.tags.models import ObjectTypes, TaggedObject
+ from superset.tags.models import ObjectType, TaggedObject
# Define a list of objects to tag
query = session_with_data.query(SavedQuery).first()
@@ -112,9 +112,9 @@ def test_create_command_success_clear(session_with_data: Session, mocker: MockFi
mocker.patch("superset.daos.query.SavedQueryDAO.find_by_id", return_value=query)
objects_to_tag = [
- (ObjectTypes.query, query.id),
- (ObjectTypes.chart, chart.id),
- (ObjectTypes.dashboard, dashboard.id),
+ (ObjectType.query, query.id),
+ (ObjectType.chart, chart.id),
+ (ObjectType.dashboard, dashboard.id),
]
CreateCustomTagWithRelationshipsCommand(
diff --git a/tests/unit_tests/tags/commands/update_test.py b/tests/unit_tests/tags/commands/update_test.py
index 84007fbb685d2..47ef16e4e7162 100644
--- a/tests/unit_tests/tags/commands/update_test.py
+++ b/tests/unit_tests/tags/commands/update_test.py
@@ -58,10 +58,10 @@ def session_with_data(session: Session):
def test_update_command_success(session_with_data: Session, mocker: MockFixture):
+ from superset.commands.tag.update import UpdateTagCommand
from superset.daos.tag import TagDAO
from superset.models.dashboard import Dashboard
- from superset.tags.commands.update import UpdateTagCommand
- from superset.tags.models import ObjectTypes, TaggedObject
+ from superset.tags.models import ObjectType, TaggedObject
dashboard = session_with_data.query(Dashboard).first()
mocker.patch(
@@ -72,7 +72,7 @@ def test_update_command_success(session_with_data: Session, mocker: MockFixture)
)
objects_to_tag = [
- (ObjectTypes.dashboard, dashboard.id),
+ (ObjectType.dashboard, dashboard.id),
]
tag_to_update = TagDAO.find_by_name("test_name")
@@ -94,12 +94,12 @@ def test_update_command_success(session_with_data: Session, mocker: MockFixture)
def test_update_command_success_duplicates(
session_with_data: Session, mocker: MockFixture
):
+ from superset.commands.tag.create import CreateCustomTagWithRelationshipsCommand
+ from superset.commands.tag.update import UpdateTagCommand
from superset.daos.tag import TagDAO
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
- from superset.tags.commands.create import CreateCustomTagWithRelationshipsCommand
- from superset.tags.commands.update import UpdateTagCommand
- from superset.tags.models import ObjectTypes, TaggedObject
+ from superset.tags.models import ObjectType, TaggedObject
dashboard = session_with_data.query(Dashboard).first()
chart = session_with_data.query(Slice).first()
@@ -113,7 +113,7 @@ def test_update_command_success_duplicates(
)
objects_to_tag = [
- (ObjectTypes.dashboard, dashboard.id),
+ (ObjectType.dashboard, dashboard.id),
]
CreateCustomTagWithRelationshipsCommand(
@@ -123,7 +123,7 @@ def test_update_command_success_duplicates(
tag_to_update = TagDAO.find_by_name("test_tag")
objects_to_tag = [
- (ObjectTypes.chart, chart.id),
+ (ObjectType.chart, chart.id),
]
changed_model = UpdateTagCommand(
tag_to_update.id,
@@ -144,18 +144,18 @@ def test_update_command_success_duplicates(
def test_update_command_failed_validation(
session_with_data: Session, mocker: MockFixture
):
+ from superset.commands.tag.create import CreateCustomTagWithRelationshipsCommand
+ from superset.commands.tag.exceptions import TagInvalidError
+ from superset.commands.tag.update import UpdateTagCommand
from superset.daos.tag import TagDAO
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
- from superset.tags.commands.create import CreateCustomTagWithRelationshipsCommand
- from superset.tags.commands.exceptions import TagInvalidError
- from superset.tags.commands.update import UpdateTagCommand
- from superset.tags.models import ObjectTypes
+ from superset.tags.models import ObjectType
dashboard = session_with_data.query(Dashboard).first()
chart = session_with_data.query(Slice).first()
objects_to_tag = [
- (ObjectTypes.chart, chart.id),
+ (ObjectType.chart, chart.id),
]
mocker.patch(
diff --git a/tests/unit_tests/tasks/test_async_queries.py b/tests/unit_tests/tasks/test_async_queries.py
index 5787bbdc8bb6b..1e14d742da619 100644
--- a/tests/unit_tests/tasks/test_async_queries.py
+++ b/tests/unit_tests/tasks/test_async_queries.py
@@ -3,7 +3,7 @@
import pytest
from flask_babel import lazy_gettext as _
-from superset.charts.commands.exceptions import ChartDataQueryFailedError
+from superset.commands.chart.exceptions import ChartDataQueryFailedError
@mock.patch("superset.tasks.async_queries.security_manager")
diff --git a/tests/unit_tests/utils/date_parser_tests.py b/tests/unit_tests/utils/date_parser_tests.py
index a2ec20901ab43..0311377237de7 100644
--- a/tests/unit_tests/utils/date_parser_tests.py
+++ b/tests/unit_tests/utils/date_parser_tests.py
@@ -22,7 +22,7 @@
import pytest
from dateutil.relativedelta import relativedelta
-from superset.charts.commands.exceptions import (
+from superset.commands.chart.exceptions import (
TimeRangeAmbiguousError,
TimeRangeParseFailError,
)
diff --git a/tests/unit_tests/utils/test_core.py b/tests/unit_tests/utils/test_core.py
index a8d5a2af29d5c..3e73637dad5b2 100644
--- a/tests/unit_tests/utils/test_core.py
+++ b/tests/unit_tests/utils/test_core.py
@@ -17,16 +17,19 @@
import os
from dataclasses import dataclass
from typing import Any, Optional
-from unittest.mock import MagicMock
+from unittest.mock import MagicMock, patch
import pandas as pd
import pytest
+from sqlalchemy import CheckConstraint, Column, Integer, MetaData, Table
from superset.exceptions import SupersetException
from superset.utils.core import (
cast_to_boolean,
check_is_safe_zip,
DateColumn,
+ generic_find_constraint_name,
+ generic_find_fk_constraint_name,
is_test,
normalize_dttm_col,
parse_boolean_string,
@@ -258,3 +261,113 @@ def test_check_if_safe_zip_hidden_bomb(app_context: None) -> None:
]
with pytest.raises(SupersetException):
check_is_safe_zip(ZipFile)
+
+
+def test_generic_constraint_name_exists():
+ # Create a mock SQLAlchemy database object
+ database_mock = MagicMock()
+
+ # Define the table name and constraint details
+ table_name = "my_table"
+ columns = {"column1", "column2"}
+ referenced_table_name = "other_table"
+ constraint_name = "my_constraint"
+
+ # Create a mock table object with the same structure
+ table_mock = MagicMock()
+ table_mock.name = table_name
+ table_mock.columns = [MagicMock(name=col) for col in columns]
+
+ # Create a mock for the referred_table with a name attribute
+ referred_table_mock = MagicMock()
+ referred_table_mock.name = referenced_table_name
+
+ # Create a mock for the foreign key constraint with a name attribute
+ foreign_key_constraint_mock = MagicMock()
+ foreign_key_constraint_mock.name = constraint_name
+ foreign_key_constraint_mock.referred_table = referred_table_mock
+ foreign_key_constraint_mock.column_keys = list(columns)
+
+ # Set the foreign key constraint mock as part of the table's constraints
+ table_mock.foreign_key_constraints = [foreign_key_constraint_mock]
+
+ # Configure the autoload behavior for the database mock
+ database_mock.metadata = MagicMock()
+ database_mock.metadata.tables = {table_name: table_mock}
+
+ # Mock the sa.Table creation with autoload
+ with patch("superset.utils.core.sa.Table") as table_creation_mock:
+ table_creation_mock.return_value = table_mock
+
+ result = generic_find_constraint_name(
+ table_name, columns, referenced_table_name, database_mock
+ )
+
+ assert result == constraint_name
+
+
+def test_generic_constraint_name_not_found():
+ # Create a mock SQLAlchemy database object
+ database_mock = MagicMock()
+
+ # Define the table name and constraint details
+ table_name = "my_table"
+ columns = {"column1", "column2"}
+ referenced_table_name = "other_table"
+ constraint_name = "my_constraint"
+
+ # Create a mock table object with the same structure but no matching constraint
+ table_mock = MagicMock()
+ table_mock.name = table_name
+ table_mock.columns = [MagicMock(name=col) for col in columns]
+ table_mock.foreign_key_constraints = []
+
+ # Configure the autoload behavior for the database mock
+ database_mock.metadata = MagicMock()
+ database_mock.metadata.tables = {table_name: table_mock}
+
+ result = generic_find_constraint_name(
+ table_name, columns, referenced_table_name, database_mock
+ )
+
+ assert result is None
+
+
+def test_generic_find_fk_constraint_exists():
+ insp_mock = MagicMock()
+ table_name = "my_table"
+ columns = {"column1", "column2"}
+ referenced_table_name = "other_table"
+ constraint_name = "my_constraint"
+
+ # Create a mock for the foreign key constraint as a dictionary
+ constraint_mock = {
+ "name": constraint_name,
+ "referred_table": referenced_table_name,
+ "referred_columns": list(columns),
+ }
+
+ # Configure the Inspector mock to return the list of foreign key constraints
+ insp_mock.get_foreign_keys.return_value = [constraint_mock]
+
+ result = generic_find_fk_constraint_name(
+ table_name, columns, referenced_table_name, insp_mock
+ )
+
+ assert result == constraint_name
+
+
+def test_generic_find_fk_constraint_none_exist():
+ insp_mock = MagicMock()
+ table_name = "my_table"
+ columns = {"column1", "column2"}
+ referenced_table_name = "other_table"
+
+ # Configure the Inspector mock to return the list of foreign key constraints
+ insp_mock.get_foreign_keys.return_value = []
+
+ result = generic_find_fk_constraint_name(
+ table_name, columns, referenced_table_name, insp_mock
+ )
+
+ assert result is None