Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Backend agnostic machine learning models #962

Merged
merged 43 commits into from
Jan 31, 2023
Merged
Show file tree
Hide file tree
Changes from 30 commits
Commits
Show all changes
43 commits
Select commit Hold shift + click to select a range
ff2c3e5
cpu/gpu_classes and tests
sarahyurick Dec 7, 2022
b685108
style fix
sarahyurick Dec 7, 2022
069caa8
edit tests
sarahyurick Dec 7, 2022
f2c5d87
split up tests
sarahyurick Dec 8, 2022
4eedef7
remove failing gpu xgb tests
sarahyurick Dec 8, 2022
3f64c01
Apply suggestions from code review
sarahyurick Dec 8, 2022
1077aa6
edit tests
sarahyurick Dec 9, 2022
e5a6477
style fix
sarahyurick Dec 9, 2022
549afef
minor style fix
sarahyurick Dec 9, 2022
72c37ff
ignore flake8 import errors
sarahyurick Dec 9, 2022
a300b9d
maybe?
sarahyurick Dec 9, 2022
7704ce2
fixture stuff??
sarahyurick Dec 9, 2022
ab7cc08
remove fixture stuff lol
sarahyurick Dec 9, 2022
8269e56
skip python 3.8
sarahyurick Dec 9, 2022
bbf4dc6
Merge branch 'main' into agnostic_models
sarahyurick Dec 15, 2022
e43710d
reorder logic
sarahyurick Dec 15, 2022
9f49f58
Merge branch 'main' into agnostic_models
sarahyurick Dec 16, 2022
331cee0
update cuml paths
sarahyurick Dec 16, 2022
090d5a9
Merge branch 'main' into agnostic_models
sarahyurick Jan 18, 2023
ebaa2f5
Apply suggestions from code review
sarahyurick Jan 18, 2023
88169f1
remove xfail
sarahyurick Jan 20, 2023
c0d37ac
Merge branch 'main' into agnostic_models
ayushdg Jan 23, 2023
6311a39
Merge branch 'main' into agnostic_models
ayushdg Jan 24, 2023
a0d6b15
Merge branch 'main' into agnostic_models
sarahyurick Jan 25, 2023
e3f956c
use sklearn all_estimators
sarahyurick Jan 25, 2023
d0d07cf
util function and unit test
sarahyurick Jan 25, 2023
a1a45f4
edit cpu/gpu tests
sarahyurick Jan 25, 2023
63abe98
minor test updates
sarahyurick Jan 25, 2023
66af9bd
remove sys
sarahyurick Jan 25, 2023
ad8bf0e
Apply suggestions from code review
sarahyurick Jan 26, 2023
e1ca596
gpu_timeseries fixture
sarahyurick Jan 26, 2023
f61131e
modify check_trained_models
sarahyurick Jan 26, 2023
9425286
Refactor gpu_client fixture, consolidate model tests
charlesbluca Jan 27, 2023
4a30c3c
Merge branch 'main' into agnostic_models
sarahyurick Jan 27, 2023
23022a0
add dask_cudf=None
sarahyurick Jan 27, 2023
c96d4e8
fix test_predict_with_limit_offset
sarahyurick Jan 27, 2023
bfefe83
update xgboost test
sarahyurick Jan 27, 2023
84cec59
add_boosting_classes
sarahyurick Jan 30, 2023
0721c21
Merge branch 'main' into agnostic_models
sarahyurick Jan 30, 2023
c293562
link to issue
sarahyurick Jan 30, 2023
93ff0a1
Merge branch 'main' into agnostic_models
sarahyurick Jan 30, 2023
4717bde
logistic regression error
sarahyurick Jan 31, 2023
98c42d5
fix gpu test
sarahyurick Jan 31, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion dask_sql/physical/rel/custom/create_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,18 @@

from dask_sql.datacontainer import ColumnContainer, DataContainer
from dask_sql.physical.rel.base import BaseRelPlugin
from dask_sql.utils import convert_sql_kwargs, import_class
from dask_sql.physical.utils.ml_classes import get_cpu_classes, get_gpu_classes
from dask_sql.utils import convert_sql_kwargs, import_class, is_cudf_type

if TYPE_CHECKING:
import dask_sql
from dask_sql.rust import LogicalPlan

logger = logging.getLogger(__name__)

cpu_classes = get_cpu_classes()
gpu_classes = get_gpu_classes()


class CreateExperimentPlugin(BaseRelPlugin):
"""
Expand Down Expand Up @@ -145,6 +149,13 @@ def convert(self, rel: "LogicalPlan", context: "dask_sql.Context") -> DataContai
y = training_df[target_column]

if model_class and experiment_class:
if is_cudf_type(training_df):
model_class = gpu_classes.get(model_class, model_class)
experiment_class = gpu_classes.get(experiment_class, experiment_class)
else:
model_class = cpu_classes.get(model_class, model_class)
experiment_class = cpu_classes.get(experiment_class, experiment_class)

try:
ModelClass = import_class(model_class)
except ImportError:
Expand Down
15 changes: 12 additions & 3 deletions dask_sql/physical/rel/custom/create_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,18 @@

from dask_sql.datacontainer import DataContainer
from dask_sql.physical.rel.base import BaseRelPlugin
from dask_sql.utils import convert_sql_kwargs, import_class
from dask_sql.physical.utils.ml_classes import get_cpu_classes, get_gpu_classes
from dask_sql.utils import convert_sql_kwargs, import_class, is_cudf_type

if TYPE_CHECKING:
import dask_sql
from dask_sql.rust import LogicalPlan

logger = logging.getLogger(__name__)

cpu_classes = get_cpu_classes()
gpu_classes = get_gpu_classes()


class CreateModelPlugin(BaseRelPlugin):
"""
Expand Down Expand Up @@ -130,6 +134,13 @@ def convert(self, rel: "LogicalPlan", context: "dask_sql.Context") -> DataContai
wrap_fit = kwargs.pop("wrap_fit", None)
fit_kwargs = kwargs.pop("fit_kwargs", {})

training_df = context.sql(select)

if is_cudf_type(training_df):
model_class = gpu_classes.get(model_class, model_class)
else:
model_class = cpu_classes.get(model_class, model_class)

try:
ModelClass = import_class(model_class)
except ImportError:
Expand All @@ -155,8 +166,6 @@ def convert(self, rel: "LogicalPlan", context: "dask_sql.Context") -> DataContai
else:
wrap_fit = False

training_df = context.sql(select)

if target_column:
non_target_columns = [
col for col in training_df.columns if col != target_column
Expand Down
130 changes: 130 additions & 0 deletions dask_sql/physical/utils/ml_classes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
def get_cpu_classes():
try:
from sklearn.utils import all_estimators

cpu_classes = {
k: v.__module__ + "." + v.__qualname__ for k, v in all_estimators()
}
except ImportError:
cpu_classes = {}

# Boosting libraries
cpu_classes["LGBMModel"] = "lightgbm.LGBMModel"
cpu_classes["LGBMClassifier"] = "lightgbm.LGBMClassifier"
cpu_classes["LGBMRegressor"] = "lightgbm.LGBMRegressor"
cpu_classes["LGBMRanker"] = "lightgbm.LGBMRanker"
cpu_classes["XGBRegressor"] = "xgboost.XGBRegressor"
cpu_classes["XGBClassifier"] = "xgboost.XGBClassifier"
cpu_classes["XGBRanker"] = "xgboost.XGBRanker"
cpu_classes["XGBRFRegressor"] = "xgboost.XGBRFRegressor"
cpu_classes["XGBRFClassifier"] = "xgboost.XGBRFClassifier"
cpu_classes["DaskXGBClassifier"] = "xgboost.dask.DaskXGBClassifier"
cpu_classes["DaskXGBRegressor"] = "xgboost.dask.DaskXGBRegressor"
cpu_classes["DaskXGBRanker"] = "xgboost.dask.DaskXGBRanker"
cpu_classes["DaskXGBRFRegressor"] = "xgboost.dask.DaskXGBRFRegressor"
cpu_classes["DaskXGBRFClassifier"] = "xgboost.dask.DaskXGBRFClassifier"
sarahyurick marked this conversation as resolved.
Show resolved Hide resolved

return cpu_classes


def get_gpu_classes():
gpu_classes = {
# cuml.dask
"DBSCAN": "cuml.dask.cluster.dbscan.DBSCAN",
"KMeans": "cuml.dask.cluster.kmeans.KMeans",
"PCA": "cuml.dask.decomposition.pca.PCA",
"TruncatedSVD": "cuml.dask.decomposition.tsvd.TruncatedSVD",
"RandomForestClassifier": "cuml.dask.ensemble.randomforestclassifier.RandomForestClassifier",
"RandomForestRegressor": "cuml.dask.ensemble.randomforestregressor.RandomForestRegressor",
# ImportError: dask-glm >= 0.2.1.dev was not found, please install it to use multi-GPU logistic regression.
# "LogisticRegression": "cuml.dask.extended.linear_model.logistic_regression.LogisticRegression",
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would it make sense to throw a TODO or FIXME in here to track this failure? Not sure if there's an upstream issue open around this

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, I opened #1015 and linked to it.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure if this is something we should fix on the Dask-SQL side (see #1015 (comment)). Would it make sense to have some sort of try/except logic around this?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks @sarahyurick 🙂 with that additional context, IMO I feel like the best option on our end for now would be to include cuml.dask.extended.linear_model.logistic_regression.LogisticRegression in the mapping and raise the ImportError directly, with the idea being that users would just subsequently install dask-glm into their environment and try rerunning their query.

I suppose if we wanted to be as informative as possible, we could reraise the error with some addendum about using the non-Dask equivalent class if installing dask-glm isn't an option, though I think that message would probably make more sense to add upstream since it would be informative to cuML users at large.

"LogisticRegression": "cuml.linear_model.LogisticRegression",
"TfidfTransformer": "cuml.dask.feature_extraction.text.tfidf_transformer.TfidfTransformer",
"LinearRegression": "cuml.dask.linear_model.linear_regression.LinearRegression",
"Ridge": "cuml.dask.linear_model.ridge.Ridge",
"Lasso": "cuml.dask.linear_model.lasso.Lasso",
"ElasticNet": "cuml.dask.linear_model.elastic_net.ElasticNet",
"UMAP": "cuml.dask.manifold.umap.UMAP",
"MultinomialNB": "cuml.dask.naive_bayes.naive_bayes.MultinomialNB",
"NearestNeighbors": "cuml.dask.neighbors.nearest_neighbors.NearestNeighbors",
"KNeighborsClassifier": "cuml.dask.neighbors.kneighbors_classifier.KNeighborsClassifier",
"KNeighborsRegressor": "cuml.dask.neighbors.kneighbors_regressor.KNeighborsRegressor",
"LabelBinarizer": "cuml.dask.preprocessing.label.LabelBinarizer",
"OneHotEncoder": "cuml.dask.preprocessing.encoders.OneHotEncoder",
"LabelEncoder": "cuml.dask.preprocessing.LabelEncoder.LabelEncoder",
"CD": "cuml.dask.solvers.cd.CD",
# cuml
"Base": "cuml.internals.base.Base",
"Handle": "cuml.common.handle.Handle",
"AgglomerativeClustering": "cuml.cluster.agglomerative.AgglomerativeClustering",
"HDBSCAN": "cuml.cluster.hdbscan.HDBSCAN",
"IncrementalPCA": "cuml.decomposition.incremental_pca.IncrementalPCA",
"ForestInference": "cuml.fil.fil.ForestInference",
"KernelRidge": "cuml.kernel_ridge.kernel_ridge.KernelRidge",
"MBSGDClassifier": "cuml.linear_model.mbsgd_classifier.MBSGDClassifier",
"MBSGDRegressor": "cuml.linear_model.mbsgd_regressor.MBSGDRegressor",
"TSNE": "cuml.manifold.t_sne.TSNE",
"KernelDensity": "cuml.neighbors.kernel_density.KernelDensity",
"GaussianRandomProjection": "cuml.random_projection.random_projection.GaussianRandomProjection",
"SparseRandomProjection": "cuml.random_projection.random_projection.SparseRandomProjection",
"SGD": "cuml.solvers.sgd.SGD",
"QN": "cuml.solvers.qn.QN",
"SVC": "cuml.svm.SVC",
"SVR": "cuml.svm.SVR",
"LinearSVC": "cuml.svm.LinearSVC",
"LinearSVR": "cuml.svm.LinearSVR",
"ARIMA": "cuml.tsa.arima.ARIMA",
"AutoARIMA": "cuml.tsa.auto_arima.AutoARIMA",
"ExponentialSmoothing": "cuml.tsa.holtwinters.ExponentialSmoothing",
# sklearn
"Binarizer": "cuml.preprocessing.Binarizer",
"KernelCenterer": "cuml.preprocessing.KernelCenterer",
"MinMaxScaler": "cuml.preprocessing.MinMaxScaler",
"MaxAbsScaler": "cuml.preprocessing.MaxAbsScaler",
"Normalizer": "cuml.preprocessing.Normalizer",
"PolynomialFeatures": "cuml.preprocessing.PolynomialFeatures",
"PowerTransformer": "cuml.preprocessing.PowerTransformer",
"QuantileTransformer": "cuml.preprocessing.QuantileTransformer",
"RobustScaler": "cuml.preprocessing.RobustScaler",
"StandardScaler": "cuml.preprocessing.StandardScaler",
"SimpleImputer": "cuml.preprocessing.SimpleImputer",
"MissingIndicator": "cuml.preprocessing.MissingIndicator",
"KBinsDiscretizer": "cuml.preprocessing.KBinsDiscretizer",
"FunctionTransformer": "cuml.preprocessing.FunctionTransformer",
"ColumnTransformer": "cuml.compose.ColumnTransformer",
"GridSearchCV": "sklearn.model_selection.GridSearchCV",
"Pipeline": "sklearn.pipeline.Pipeline",
# Other
"UniversalBase": "cuml.internals.base.UniversalBase",
"Lars": "cuml.experimental.linear_model.lars.Lars",
"TfidfVectorizer": "cuml.feature_extraction._tfidf_vectorizer.TfidfVectorizer",
"CountVectorizer": "cuml.feature_extraction._vectorizers.CountVectorizer",
"HashingVectorizer": "cuml.feature_extraction._vectorizers.HashingVectorizer",
"StratifiedKFold": "cuml.model_selection._split.StratifiedKFold",
"OneVsOneClassifier": "cuml.multiclass.multiclass.OneVsOneClassifier",
"OneVsRestClassifier": "cuml.multiclass.multiclass.OneVsRestClassifier",
"MulticlassClassifier": "cuml.multiclass.multiclass.MulticlassClassifier",
"BernoulliNB": "cuml.naive_bayes.naive_bayes.BernoulliNB",
"GaussianNB": "cuml.naive_bayes.naive_bayes.GaussianNB",
"ComplementNB": "cuml.naive_bayes.naive_bayes.ComplementNB",
"CategoricalNB": "cuml.naive_bayes.naive_bayes.CategoricalNB",
"TargetEncoder": "cuml.preprocessing.TargetEncoder",
"PorterStemmer": "cuml.preprocessing.text.stem.porter_stemmer.PorterStemmer",
# Boosting libaries
"LGBMModel": "lightgbm.LGBMModel",
"LGBMClassifier": "lightgbm.LGBMClassifier",
"LGBMRegressor": "lightgbm.LGBMRegressor",
"LGBMRanker": "lightgbm.LGBMRanker",
"XGBRegressor": "xgboost.XGBRegressor",
"XGBClassifier": "xgboost.XGBClassifier",
"XGBRanker": "xgboost.XGBRanker",
"XGBRFRegressor": "xgboost.XGBRFRegressor",
"XGBRFClassifier": "xgboost.XGBRFClassifier",
"DaskXGBClassifier": "xgboost.dask.DaskXGBClassifier",
"DaskXGBRegressor": "xgboost.dask.DaskXGBRegressor",
"DaskXGBRanker": "xgboost.dask.DaskXGBRanker",
"DaskXGBRFRegressor": "xgboost.dask.DaskXGBRFRegressor",
"DaskXGBRFClassifier": "xgboost.dask.DaskXGBRFClassifier",
}

return gpu_classes
Loading