From 8589518ee9db1b0e9aab3ce6d871bde964c44b89 Mon Sep 17 00:00:00 2001 From: liyuan Date: Thu, 19 Dec 2024 14:09:51 +0800 Subject: [PATCH] update for 2412 release Signed-off-by: liyuan --- .../xgboost-examples/csp/databricks/databricks.md | 4 ++-- docs/get-started/xgboost-examples/csp/databricks/init.sh | 2 +- .../xgboost-examples/on-prem-cluster/kubernetes-scala.md | 2 +- .../prepare-package-data/preparation-python.md | 2 +- .../prepare-package-data/preparation-scala.md | 2 +- examples/ML+DL-Examples/Optuna-Spark/README.md | 4 ++-- .../Optuna-Spark/optuna-examples/databricks/init_optuna.sh | 4 ++-- .../optuna-examples/databricks/start_cluster.sh | 2 +- .../Optuna-Spark/optuna-examples/optuna-dataframe.ipynb | 4 ++-- examples/ML+DL-Examples/Spark-Rapids-ML/pca/README.md | 2 +- .../ML+DL-Examples/Spark-Rapids-ML/pca/notebooks/pca.ipynb | 6 +++--- .../micro-benchmarks/notebooks/micro-benchmarks-gpu.ipynb | 2 +- examples/SQL+DF-Examples/tpcds/README.md | 2 +- examples/SQL+DF-Examples/tpcds/notebooks/TPCDS-SF10.ipynb | 2 +- examples/UDF-Examples/RAPIDS-accelerated-UDFs/README.md | 2 +- examples/UDF-Examples/RAPIDS-accelerated-UDFs/pom.xml | 2 +- .../agaricus/notebooks/python/agaricus-gpu.ipynb | 2 +- .../mortgage/notebooks/python/MortgageETL.ipynb | 6 +++--- .../mortgage/notebooks/python/cv-mortgage-gpu.ipynb | 2 +- .../mortgage/notebooks/python/mortgage-gpu.ipynb | 2 +- .../mortgage/notebooks/scala/mortgage-ETL.ipynb | 4 ++-- .../taxi/notebooks/python/cv-taxi-gpu.ipynb | 2 +- .../XGBoost-Examples/taxi/notebooks/python/taxi-ETL.ipynb | 4 ++-- .../XGBoost-Examples/taxi/notebooks/python/taxi-gpu.ipynb | 4 ++-- .../XGBoost-Examples/taxi/notebooks/scala/taxi-ETL.ipynb | 4 ++-- tools/databricks/README.md | 2 +- ...for Apache Spark] Profiling Tool Notebook Template.ipynb | 2 +- ...Apache Spark] Qualification Tool Notebook Template.ipynb | 2 +- 28 files changed, 40 insertions(+), 40 deletions(-) diff --git a/docs/get-started/xgboost-examples/csp/databricks/databricks.md b/docs/get-started/xgboost-examples/csp/databricks/databricks.md index 8bb7fe7f..400e0166 100644 --- a/docs/get-started/xgboost-examples/csp/databricks/databricks.md +++ b/docs/get-started/xgboost-examples/csp/databricks/databricks.md @@ -21,7 +21,7 @@ Navigate to your home directory in the UI and select **Create** > **File** from create an `init.sh` scripts with contents: ```bash #!/bin/bash - sudo wget -O /databricks/jars/rapids-4-spark_2.12-24.10.0.jar https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.10.0/rapids-4-spark_2.12-24.10.0.jar + sudo wget -O /databricks/jars/rapids-4-spark_2.12-24.12.0.jar https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.12.0/rapids-4-spark_2.12-24.12.0.jar ``` 1. Select the Databricks Runtime Version from one of the supported runtimes specified in the Prerequisites section. @@ -68,7 +68,7 @@ create an `init.sh` scripts with contents: ```bash spark.rapids.sql.python.gpu.enabled true spark.python.daemon.module rapids.daemon_databricks - spark.executorEnv.PYTHONPATH /databricks/jars/rapids-4-spark_2.12-24.10.0.jar:/databricks/spark/python + spark.executorEnv.PYTHONPATH /databricks/jars/rapids-4-spark_2.12-24.12.0.jar:/databricks/spark/python ``` Note that since python memory pool require installing the cudf library, so you need to install cudf library in each worker nodes `pip install cudf-cu11 --extra-index-url=https://pypi.nvidia.com` or disable python memory pool diff --git a/docs/get-started/xgboost-examples/csp/databricks/init.sh b/docs/get-started/xgboost-examples/csp/databricks/init.sh index fc415b2d..b7e05622 100644 --- a/docs/get-started/xgboost-examples/csp/databricks/init.sh +++ b/docs/get-started/xgboost-examples/csp/databricks/init.sh @@ -1,7 +1,7 @@ sudo rm -f /databricks/jars/spark--maven-trees--ml--10.x--xgboost-gpu--ml.dmlc--xgboost4j-gpu_2.12--ml.dmlc__xgboost4j-gpu_2.12__1.5.2.jar sudo rm -f /databricks/jars/spark--maven-trees--ml--10.x--xgboost-gpu--ml.dmlc--xgboost4j-spark-gpu_2.12--ml.dmlc__xgboost4j-spark-gpu_2.12__1.5.2.jar -sudo wget -O /databricks/jars/rapids-4-spark_2.12-24.10.0.jar https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.10.0/rapids-4-spark_2.12-24.10.0.jar +sudo wget -O /databricks/jars/rapids-4-spark_2.12-24.12.0.jar https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.12.0/rapids-4-spark_2.12-24.12.0.jar sudo wget -O /databricks/jars/xgboost4j-gpu_2.12-1.7.1.jar https://repo1.maven.org/maven2/ml/dmlc/xgboost4j-gpu_2.12/1.7.1/xgboost4j-gpu_2.12-1.7.1.jar sudo wget -O /databricks/jars/xgboost4j-spark-gpu_2.12-1.7.1.jar https://repo1.maven.org/maven2/ml/dmlc/xgboost4j-spark-gpu_2.12/1.7.1/xgboost4j-spark-gpu_2.12-1.7.1.jar ls -ltr diff --git a/docs/get-started/xgboost-examples/on-prem-cluster/kubernetes-scala.md b/docs/get-started/xgboost-examples/on-prem-cluster/kubernetes-scala.md index a0b5fe30..3c007a36 100644 --- a/docs/get-started/xgboost-examples/on-prem-cluster/kubernetes-scala.md +++ b/docs/get-started/xgboost-examples/on-prem-cluster/kubernetes-scala.md @@ -40,7 +40,7 @@ export SPARK_DOCKER_IMAGE= export SPARK_DOCKER_TAG= pushd ${SPARK_HOME} -wget https://github.com/NVIDIA/spark-rapids-examples/raw/branch-24.10/dockerfile/Dockerfile +wget https://github.com/NVIDIA/spark-rapids-examples/raw/branch-24.12/dockerfile/Dockerfile # Optionally install additional jars into ${SPARK_HOME}/jars/ diff --git a/docs/get-started/xgboost-examples/prepare-package-data/preparation-python.md b/docs/get-started/xgboost-examples/prepare-package-data/preparation-python.md index d8534cb1..b43b3801 100644 --- a/docs/get-started/xgboost-examples/prepare-package-data/preparation-python.md +++ b/docs/get-started/xgboost-examples/prepare-package-data/preparation-python.md @@ -5,7 +5,7 @@ For simplicity export the location to these jars. All examples assume the packag ### Download the jars Download the RAPIDS Accelerator for Apache Spark plugin jar - * [RAPIDS Spark Package](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.10.0/rapids-4-spark_2.12-24.10.0.jar) + * [RAPIDS Spark Package](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.12.0/rapids-4-spark_2.12-24.12.0.jar) ### Build XGBoost Python Examples diff --git a/docs/get-started/xgboost-examples/prepare-package-data/preparation-scala.md b/docs/get-started/xgboost-examples/prepare-package-data/preparation-scala.md index 8cbfa0db..582cc06e 100644 --- a/docs/get-started/xgboost-examples/prepare-package-data/preparation-scala.md +++ b/docs/get-started/xgboost-examples/prepare-package-data/preparation-scala.md @@ -5,7 +5,7 @@ For simplicity export the location to these jars. All examples assume the packag ### Download the jars 1. Download the RAPIDS Accelerator for Apache Spark plugin jar - * [RAPIDS Spark Package](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.10.0/rapids-4-spark_2.12-24.10.0.jar) + * [RAPIDS Spark Package](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.12.0/rapids-4-spark_2.12-24.12.0.jar) ### Build XGBoost Scala Examples diff --git a/examples/ML+DL-Examples/Optuna-Spark/README.md b/examples/ML+DL-Examples/Optuna-Spark/README.md index 4c7e460b..7681d375 100644 --- a/examples/ML+DL-Examples/Optuna-Spark/README.md +++ b/examples/ML+DL-Examples/Optuna-Spark/README.md @@ -147,8 +147,8 @@ We use [RAPIDS](https://docs.rapids.ai/install/#get-rapids) for GPU-accelerated ``` shell sudo apt install libmysqlclient-dev -conda create -n rapids-24.10 -c rapidsai -c conda-forge -c nvidia \ - cudf=24.10 cuml=24.10 python=3.10 'cuda-version>=12.0,<=12.5' +conda create -n rapids-24.12 -c rapidsai -c conda-forge -c nvidia \ + cudf=24.12 cuml=24.12 python=3.10 'cuda-version>=12.0,<=12.5' conda activate optuna-spark pip install mysqlclient pip install optuna joblib joblibspark ipywidgets diff --git a/examples/ML+DL-Examples/Optuna-Spark/optuna-examples/databricks/init_optuna.sh b/examples/ML+DL-Examples/Optuna-Spark/optuna-examples/databricks/init_optuna.sh index 820022af..191e1248 100644 --- a/examples/ML+DL-Examples/Optuna-Spark/optuna-examples/databricks/init_optuna.sh +++ b/examples/ML+DL-Examples/Optuna-Spark/optuna-examples/databricks/init_optuna.sh @@ -41,7 +41,7 @@ fi # rapids import -SPARK_RAPIDS_VERSION=24.10.1 +SPARK_RAPIDS_VERSION=24.12.0 curl -L https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/${SPARK_RAPIDS_VERSION}/rapids-4-spark_2.12-${SPARK_RAPIDS_VERSION}.jar -o \ /databricks/jars/rapids-4-spark_2.12-${SPARK_RAPIDS_VERSION}.jar @@ -54,7 +54,7 @@ ln -s /usr/local/cuda-11.8 /usr/local/cuda sudo /databricks/python3/bin/pip3 install \ --extra-index-url=https://pypi.nvidia.com \ - "cudf-cu11==24.10.*" "cuml-cu11==24.10.*" + "cudf-cu11==24.12.*" "cuml-cu11==24.12.*" # setup python environment sudo apt clean && sudo apt update --fix-missing -y diff --git a/examples/ML+DL-Examples/Optuna-Spark/optuna-examples/databricks/start_cluster.sh b/examples/ML+DL-Examples/Optuna-Spark/optuna-examples/databricks/start_cluster.sh index 998ad7e3..1290ef32 100755 --- a/examples/ML+DL-Examples/Optuna-Spark/optuna-examples/databricks/start_cluster.sh +++ b/examples/ML+DL-Examples/Optuna-Spark/optuna-examples/databricks/start_cluster.sh @@ -12,7 +12,7 @@ json_config=$(cat < Here is the bar chart from a recent execution on Google Colab's T4 High RAM instance using -RAPIDS Spark 24.10.0 with Apache Spark 3.5.0 +RAPIDS Spark 24.12.0 with Apache Spark 3.5.0 ![tpcds-speedup](/docs/img/guides/tpcds.png) diff --git a/examples/SQL+DF-Examples/tpcds/notebooks/TPCDS-SF10.ipynb b/examples/SQL+DF-Examples/tpcds/notebooks/TPCDS-SF10.ipynb index 87a64a07..a8c19f3e 100644 --- a/examples/SQL+DF-Examples/tpcds/notebooks/TPCDS-SF10.ipynb +++ b/examples/SQL+DF-Examples/tpcds/notebooks/TPCDS-SF10.ipynb @@ -30,7 +30,7 @@ "outputs": [], "source": [ "spark_version='3.5.0'\n", - "rapids_version='24.10.0'" + "rapids_version='24.12.0'" ] }, { diff --git a/examples/UDF-Examples/RAPIDS-accelerated-UDFs/README.md b/examples/UDF-Examples/RAPIDS-accelerated-UDFs/README.md index 67d9a618..fc75cc71 100644 --- a/examples/UDF-Examples/RAPIDS-accelerated-UDFs/README.md +++ b/examples/UDF-Examples/RAPIDS-accelerated-UDFs/README.md @@ -186,7 +186,7 @@ then do the following inside the Docker container. ### Get jars from Maven Central -[rapids-4-spark_2.12-24.10.0.jar](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.10.0/rapids-4-spark_2.12-24.10.0.jar) +[rapids-4-spark_2.12-24.12.0.jar](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.12.0/rapids-4-spark_2.12-24.12.0.jar) ### Launch a local mode Spark diff --git a/examples/UDF-Examples/RAPIDS-accelerated-UDFs/pom.xml b/examples/UDF-Examples/RAPIDS-accelerated-UDFs/pom.xml index 09829d80..6edaad4c 100644 --- a/examples/UDF-Examples/RAPIDS-accelerated-UDFs/pom.xml +++ b/examples/UDF-Examples/RAPIDS-accelerated-UDFs/pom.xml @@ -37,7 +37,7 @@ cuda11 2.12 - 24.10.0 + 24.12.0 3.1.1 2.12.15 ${project.build.directory}/cpp-build diff --git a/examples/XGBoost-Examples/agaricus/notebooks/python/agaricus-gpu.ipynb b/examples/XGBoost-Examples/agaricus/notebooks/python/agaricus-gpu.ipynb index d997ca12..f92af8e5 100644 --- a/examples/XGBoost-Examples/agaricus/notebooks/python/agaricus-gpu.ipynb +++ b/examples/XGBoost-Examples/agaricus/notebooks/python/agaricus-gpu.ipynb @@ -73,7 +73,7 @@ "Setting default log level to \"WARN\".\n", "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n", "2022-11-30 06:57:40,550 WARN resource.ResourceUtils: The configuration of cores (exec = 2 task = 1, runnable tasks = 2) will result in wasted resources due to resource gpu limiting the number of runnable tasks per executor to: 1. Please adjust your configuration.\n", - "2022-11-30 06:57:54,195 WARN rapids.RapidsPluginUtils: RAPIDS Accelerator 24.10.0 using cudf 24.10.0.\n", + "2022-11-30 06:57:54,195 WARN rapids.RapidsPluginUtils: RAPIDS Accelerator 24.12.0 using cudf 24.12.0.\n", "2022-11-30 06:57:54,210 WARN rapids.RapidsPluginUtils: spark.rapids.sql.multiThreadedRead.numThreads is set to 20.\n", "2022-11-30 06:57:54,214 WARN rapids.RapidsPluginUtils: RAPIDS Accelerator is enabled, to disable GPU support set `spark.rapids.sql.enabled` to false.\n", "2022-11-30 06:57:54,214 WARN rapids.RapidsPluginUtils: spark.rapids.sql.explain is set to `NOT_ON_GPU`. Set it to 'NONE' to suppress the diagnostics logging about the query placement on the GPU.\n", diff --git a/examples/XGBoost-Examples/mortgage/notebooks/python/MortgageETL.ipynb b/examples/XGBoost-Examples/mortgage/notebooks/python/MortgageETL.ipynb index edb88183..9d7767cd 100644 --- a/examples/XGBoost-Examples/mortgage/notebooks/python/MortgageETL.ipynb +++ b/examples/XGBoost-Examples/mortgage/notebooks/python/MortgageETL.ipynb @@ -6,10 +6,10 @@ "source": [ "## Prerequirement\n", "### 1. Download data\n", - "Dataset is derived from Fannie Mae’s [Single-Family Loan Performance Data](http://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html) with all rights reserved by Fannie Mae. Refer to these [instructions](https://github.com/NVIDIA/spark-rapids-examples/blob/branch-24.10/docs/get-started/xgboost-examples/dataset/mortgage.md) to download the dataset.\n", + "Dataset is derived from Fannie Mae’s [Single-Family Loan Performance Data](http://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html) with all rights reserved by Fannie Mae. Refer to these [instructions](https://github.com/NVIDIA/spark-rapids-examples/blob/branch-24.12/docs/get-started/xgboost-examples/dataset/mortgage.md) to download the dataset.\n", "\n", "### 2. Download needed jars\n", - "* [rapids-4-spark_2.12-24.10.0.jar](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.10.0/rapids-4-spark_2.12-24.10.0.jar)\n", + "* [rapids-4-spark_2.12-24.12.0.jar](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.12.0/rapids-4-spark_2.12-24.12.0.jar)\n", "\n", "\n", "### 3. Start Spark Standalone\n", @@ -17,7 +17,7 @@ "\n", "### 4. Add ENV\n", "```\n", - "$ export SPARK_JARS=rapids-4-spark_2.12-24.10.0.jar\n", + "$ export SPARK_JARS=rapids-4-spark_2.12-24.12.0.jar\n", "$ export PYSPARK_DRIVER_PYTHON=jupyter \n", "$ export PYSPARK_DRIVER_PYTHON_OPTS=notebook\n", "```\n", diff --git a/examples/XGBoost-Examples/mortgage/notebooks/python/cv-mortgage-gpu.ipynb b/examples/XGBoost-Examples/mortgage/notebooks/python/cv-mortgage-gpu.ipynb index 663efeef..3e441712 100644 --- a/examples/XGBoost-Examples/mortgage/notebooks/python/cv-mortgage-gpu.ipynb +++ b/examples/XGBoost-Examples/mortgage/notebooks/python/cv-mortgage-gpu.ipynb @@ -63,7 +63,7 @@ "Setting default log level to \"WARN\".\n", "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n", "2022-11-25 09:34:43,952 WARN resource.ResourceUtils: The configuration of cores (exec = 4 task = 1, runnable tasks = 4) will result in wasted resources due to resource gpu limiting the number of runnable tasks per executor to: 1. Please adjust your configuration.\n", - "2022-11-25 09:34:58,155 WARN rapids.RapidsPluginUtils: RAPIDS Accelerator 24.10.0 using cudf 24.10.0.\n", + "2022-11-25 09:34:58,155 WARN rapids.RapidsPluginUtils: RAPIDS Accelerator 24.12.0 using cudf 24.12.0.\n", "2022-11-25 09:34:58,171 WARN rapids.RapidsPluginUtils: spark.rapids.sql.multiThreadedRead.numThreads is set to 20.\n", "2022-11-25 09:34:58,175 WARN rapids.RapidsPluginUtils: RAPIDS Accelerator is enabled, to disable GPU support set `spark.rapids.sql.enabled` to false.\n", "2022-11-25 09:34:58,175 WARN rapids.RapidsPluginUtils: spark.rapids.sql.explain is set to `NOT_ON_GPU`. Set it to 'NONE' to suppress the diagnostics logging about the query placement on the GPU.\n" diff --git a/examples/XGBoost-Examples/mortgage/notebooks/python/mortgage-gpu.ipynb b/examples/XGBoost-Examples/mortgage/notebooks/python/mortgage-gpu.ipynb index e0b2ba33..e64ba9e0 100644 --- a/examples/XGBoost-Examples/mortgage/notebooks/python/mortgage-gpu.ipynb +++ b/examples/XGBoost-Examples/mortgage/notebooks/python/mortgage-gpu.ipynb @@ -84,7 +84,7 @@ "22/11/24 06:14:06 INFO org.apache.spark.SparkEnv: Registering BlockManagerMaster\n", "22/11/24 06:14:06 INFO org.apache.spark.SparkEnv: Registering BlockManagerMasterHeartbeat\n", "22/11/24 06:14:06 INFO org.apache.spark.SparkEnv: Registering OutputCommitCoordinator\n", - "22/11/24 06:14:07 WARN com.nvidia.spark.rapids.RapidsPluginUtils: RAPIDS Accelerator 24.10.0 using cudf 24.10.0.\n", + "22/11/24 06:14:07 WARN com.nvidia.spark.rapids.RapidsPluginUtils: RAPIDS Accelerator 24.12.0 using cudf 24.12.0.\n", "22/11/24 06:14:07 WARN com.nvidia.spark.rapids.RapidsPluginUtils: spark.rapids.sql.multiThreadedRead.numThreads is set to 20.\n", "22/11/24 06:14:07 WARN com.nvidia.spark.rapids.RapidsPluginUtils: RAPIDS Accelerator is enabled, to disable GPU support set `spark.rapids.sql.enabled` to false.\n", "22/11/24 06:14:07 WARN com.nvidia.spark.rapids.RapidsPluginUtils: spark.rapids.sql.explain is set to `NOT_ON_GPU`. Set it to 'NONE' to suppress the diagnostics logging about the query placement on the GPU.\n" diff --git a/examples/XGBoost-Examples/mortgage/notebooks/scala/mortgage-ETL.ipynb b/examples/XGBoost-Examples/mortgage/notebooks/scala/mortgage-ETL.ipynb index a55d0787..b551df7a 100644 --- a/examples/XGBoost-Examples/mortgage/notebooks/scala/mortgage-ETL.ipynb +++ b/examples/XGBoost-Examples/mortgage/notebooks/scala/mortgage-ETL.ipynb @@ -20,14 +20,14 @@ "Refer to these [instructions](https://github.com/NVIDIA/spark-rapids-examples/blob/branch-23.12/docs/get-started/xgboost-examples/dataset/mortgage.md) to download the dataset.\n", "\n", "### 2. Download needed jars\n", - "* [rapids-4-spark_2.12-24.10.0.jar](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.10.0/rapids-4-spark_2.12-24.10.0.jar)\n", + "* [rapids-4-spark_2.12-24.12.0.jar](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.12.0/rapids-4-spark_2.12-24.12.0.jar)\n", "\n", "### 3. Start Spark Standalone\n", "Before Running the script, please setup Spark standalone mode\n", "\n", "### 4. Add ENV\n", "```\n", - "$ export SPARK_JARS=rapids-4-spark_2.12-24.10.0.jar\n", + "$ export SPARK_JARS=rapids-4-spark_2.12-24.12.0.jar\n", "\n", "```\n", "\n", diff --git a/examples/XGBoost-Examples/taxi/notebooks/python/cv-taxi-gpu.ipynb b/examples/XGBoost-Examples/taxi/notebooks/python/cv-taxi-gpu.ipynb index 5a45fe46..e0e1372e 100644 --- a/examples/XGBoost-Examples/taxi/notebooks/python/cv-taxi-gpu.ipynb +++ b/examples/XGBoost-Examples/taxi/notebooks/python/cv-taxi-gpu.ipynb @@ -62,7 +62,7 @@ "Setting default log level to \"WARN\".\n", "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n", "2022-11-30 08:02:10,103 WARN resource.ResourceUtils: The configuration of cores (exec = 2 task = 1, runnable tasks = 2) will result in wasted resources due to resource gpu limiting the number of runnable tasks per executor to: 1. Please adjust your configuration.\n", - "2022-11-30 08:02:23,737 WARN rapids.RapidsPluginUtils: RAPIDS Accelerator 24.10.0 using cudf 24.10.0.\n", + "2022-11-30 08:02:23,737 WARN rapids.RapidsPluginUtils: RAPIDS Accelerator 24.12.0 using cudf 24.12.0.\n", "2022-11-30 08:02:23,752 WARN rapids.RapidsPluginUtils: spark.rapids.sql.multiThreadedRead.numThreads is set to 20.\n", "2022-11-30 08:02:23,756 WARN rapids.RapidsPluginUtils: RAPIDS Accelerator is enabled, to disable GPU support set `spark.rapids.sql.enabled` to false.\n", "2022-11-30 08:02:23,757 WARN rapids.RapidsPluginUtils: spark.rapids.sql.explain is set to `NOT_ON_GPU`. Set it to 'NONE' to suppress the diagnostics logging about the query placement on the GPU.\n", diff --git a/examples/XGBoost-Examples/taxi/notebooks/python/taxi-ETL.ipynb b/examples/XGBoost-Examples/taxi/notebooks/python/taxi-ETL.ipynb index f10937fe..fda40075 100644 --- a/examples/XGBoost-Examples/taxi/notebooks/python/taxi-ETL.ipynb +++ b/examples/XGBoost-Examples/taxi/notebooks/python/taxi-ETL.ipynb @@ -19,14 +19,14 @@ "All data could be found at https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page\n", "\n", "### 2. Download needed jars\n", - "* [rapids-4-spark_2.12-24.10.0.jar](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.10.0/rapids-4-spark_2.12-24.10.0.jar)\n", + "* [rapids-4-spark_2.12-24.12.0.jar](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.12.0/rapids-4-spark_2.12-24.12.0.jar)\n", "\n", "### 3. Start Spark Standalone\n", "Before running the script, please setup Spark standalone mode\n", "\n", "### 4. Add ENV\n", "```\n", - "$ export SPARK_JARS=rapids-4-spark_2.12-24.10.0.jar\n", + "$ export SPARK_JARS=rapids-4-spark_2.12-24.12.0.jar\n", "$ export PYSPARK_DRIVER_PYTHON=jupyter \n", "$ export PYSPARK_DRIVER_PYTHON_OPTS=notebook\n", "```\n", diff --git a/examples/XGBoost-Examples/taxi/notebooks/python/taxi-gpu.ipynb b/examples/XGBoost-Examples/taxi/notebooks/python/taxi-gpu.ipynb index 1a1eb829..6903547c 100644 --- a/examples/XGBoost-Examples/taxi/notebooks/python/taxi-gpu.ipynb +++ b/examples/XGBoost-Examples/taxi/notebooks/python/taxi-gpu.ipynb @@ -73,7 +73,7 @@ "Setting default log level to \"WARN\".\n", "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n", "2022-11-30 07:51:19,480 WARN resource.ResourceUtils: The configuration of cores (exec = 2 task = 1, runnable tasks = 2) will result in wasted resources due to resource gpu limiting the number of runnable tasks per executor to: 1. Please adjust your configuration.\n", - "2022-11-30 07:51:33,277 WARN rapids.RapidsPluginUtils: RAPIDS Accelerator 24.10.0 using cudf 24.10.0.\n", + "2022-11-30 07:51:33,277 WARN rapids.RapidsPluginUtils: RAPIDS Accelerator 24.12.0 using cudf 24.12.0.\n", "2022-11-30 07:51:33,292 WARN rapids.RapidsPluginUtils: spark.rapids.sql.multiThreadedRead.numThreads is set to 20.\n", "2022-11-30 07:51:33,295 WARN rapids.RapidsPluginUtils: RAPIDS Accelerator is enabled, to disable GPU support set `spark.rapids.sql.enabled` to false.\n", "2022-11-30 07:51:33,295 WARN rapids.RapidsPluginUtils: spark.rapids.sql.explain is set to `NOT_ON_GPU`. Set it to 'NONE' to suppress the diagnostics logging about the query placement on the GPU.\n", @@ -266,7 +266,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Training takes 24.10 seconds\n" + "Training takes 24.12 seconds\n" ] }, { diff --git a/examples/XGBoost-Examples/taxi/notebooks/scala/taxi-ETL.ipynb b/examples/XGBoost-Examples/taxi/notebooks/scala/taxi-ETL.ipynb index 02f38b99..20d6c32d 100644 --- a/examples/XGBoost-Examples/taxi/notebooks/scala/taxi-ETL.ipynb +++ b/examples/XGBoost-Examples/taxi/notebooks/scala/taxi-ETL.ipynb @@ -19,14 +19,14 @@ "All data could be found at https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page\n", "\n", "### 2. Download needed jar\n", - "* [rapids-4-spark_2.12-24.10.0.jar](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.10.0/rapids-4-spark_2.12-24.10.0.jar)\n", + "* [rapids-4-spark_2.12-24.12.0.jar](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/24.12.0/rapids-4-spark_2.12-24.12.0.jar)\n", "\n", "### 3. Start Spark Standalone\n", "Before running the script, please setup Spark standalone mode\n", "\n", "### 4. Add ENV\n", "```\n", - "$ export SPARK_JARS=rapids-4-spark_2.12-24.10.0.jar\n", + "$ export SPARK_JARS=rapids-4-spark_2.12-24.12.0.jar\n", "\n", "```\n", "\n", diff --git a/tools/databricks/README.md b/tools/databricks/README.md index 8004ec41..95b222e9 100644 --- a/tools/databricks/README.md +++ b/tools/databricks/README.md @@ -19,4 +19,4 @@ top of the notebook. After that, select *Run all* to execute the tools for the 1. Multiple event logs must be comma-separated. - For example: `/dbfs/path/to/eventlog1,/dbfs/path/to/eventlog2` -**Latest Tools Version Supported** 24.10.0 \ No newline at end of file +**Latest Tools Version Supported** 24.12.0 \ No newline at end of file diff --git a/tools/databricks/[RAPIDS Accelerator for Apache Spark] Profiling Tool Notebook Template.ipynb b/tools/databricks/[RAPIDS Accelerator for Apache Spark] Profiling Tool Notebook Template.ipynb index f59953a1..0c058638 100644 --- a/tools/databricks/[RAPIDS Accelerator for Apache Spark] Profiling Tool Notebook Template.ipynb +++ b/tools/databricks/[RAPIDS Accelerator for Apache Spark] Profiling Tool Notebook Template.ipynb @@ -53,7 +53,7 @@ }, "outputs": [], "source": [ - "TOOLS_VER = \"24.10.0\"\n", + "TOOLS_VER = \"24.12.0\"\n", "print(f\"Using Tools Version: {TOOLS_VER}\")" ] }, diff --git a/tools/databricks/[RAPIDS Accelerator for Apache Spark] Qualification Tool Notebook Template.ipynb b/tools/databricks/[RAPIDS Accelerator for Apache Spark] Qualification Tool Notebook Template.ipynb index b27f2f91..3d1894ca 100644 --- a/tools/databricks/[RAPIDS Accelerator for Apache Spark] Qualification Tool Notebook Template.ipynb +++ b/tools/databricks/[RAPIDS Accelerator for Apache Spark] Qualification Tool Notebook Template.ipynb @@ -49,7 +49,7 @@ }, "outputs": [], "source": [ - "TOOLS_VER = \"24.10.0\"\n", + "TOOLS_VER = \"24.12.0\"\n", "print(f\"Using Tools Version: {TOOLS_VER}\")" ] },