diff --git a/nemo/utils/cloud.py b/nemo/utils/cloud.py index 8b74963f12d2..7245567d636c 100644 --- a/nemo/utils/cloud.py +++ b/nemo/utils/cloud.py @@ -17,6 +17,8 @@ from time import sleep import wget +from pytorch_lightning.plugins.environments import LightningEnvironment +from pytorch_lightning.strategies import DDPStrategy, StrategyRegistry from nemo.utils import logging @@ -80,3 +82,60 @@ def maybe_download_from_cloud(url, filename, subfolder=None, cache_dir=None, ref sleep(0.05) continue raise ValueError("Not able to download url right now, please try again.") + + +class SageMakerDDPStrategy(DDPStrategy): + @property + def cluster_environment(self): + env = LightningEnvironment() + env.world_size = lambda: int(os.environ["WORLD_SIZE"]) + env.global_rank = lambda: int(os.environ["RANK"]) + return env + + @cluster_environment.setter + def cluster_environment(self, env): + # prevents Lightning from overriding the Environment required for SageMaker + pass + + +def initialize_sagemaker() -> None: + """ + Helper function to initiate sagemaker with NeMo. + This function installs libraries that NeMo requires for the ASR toolkit + initializes sagemaker ddp. + """ + + StrategyRegistry.register( + name='smddp', strategy=SageMakerDDPStrategy, process_group_backend="smddp", find_unused_parameters=False, + ) + + def _install_system_libraries() -> None: + os.system('chmod 777 /tmp && apt-get update && apt-get install -y libsndfile1 ffmpeg') + + def _patch_torch_metrics() -> None: + """ + Patches torchmetrics to not rely on internal state. + This is because sagemaker DDP overrides the `__init__` function of the modules to do automatic-partitioning. + """ + from torchmetrics import Metric + + def __new_hash__(self): + hash_vals = [self.__class__.__name__, id(self)] + return hash(tuple(hash_vals)) + + Metric.__hash__ = __new_hash__ + + _patch_torch_metrics() + + if os.environ.get("RANK") and os.environ.get("WORLD_SIZE"): + import smdistributed.dataparallel.torch.distributed as dist + + # has to be imported, as it overrides torch modules and such when DDP is enabled. + import smdistributed.dataparallel.torch.torch_smddp + + dist.init_process_group() + + if dist.get_local_rank(): + _install_system_libraries() + return dist.barrier() # wait for main process + _install_system_libraries() + return diff --git a/nemo/utils/notebook_utils.py b/nemo/utils/notebook_utils.py new file mode 100644 index 000000000000..ce53f9f3dba5 --- /dev/null +++ b/nemo/utils/notebook_utils.py @@ -0,0 +1,104 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import glob +import json +import os +import os.path +import subprocess +import tarfile +from typing import Optional + +import wget + + +# Function to build a manifest +def build_manifest(transcripts_path, manifest_path, data_dir, mount_dir, wav_path): + # create manifest with reference to this directory. This is useful when mounting the dataset. + mount_dir = mount_dir if mount_dir else data_dir + with open(transcripts_path, 'r') as fin: + with open(manifest_path, 'w') as fout: + for line in fin: + # Lines look like this: + # transcript (fileID) + transcript = line[: line.find('(') - 1].lower() + transcript = transcript.replace('', '').replace('', '') + transcript = transcript.strip() + + file_id = line[line.find('(') + 1 : -2] # e.g. "cen4-fash-b" + audio_path = os.path.join( + data_dir, wav_path, file_id[file_id.find('-') + 1 : file_id.rfind('-')], file_id + '.wav' + ) + + mounted_audio_path = os.path.join( + mount_dir, wav_path, file_id[file_id.find('-') + 1 : file_id.rfind('-')], file_id + '.wav' + ) + # import sox here to not require sox to be available for importing all utils. + import sox + + duration = sox.file_info.duration(audio_path) + + # Write the metadata to the manifest + metadata = {"audio_filepath": mounted_audio_path, "duration": duration, "text": transcript} + json.dump(metadata, fout) + fout.write('\n') + + +def download_an4(data_dir: str = "./", train_mount_dir: Optional[str] = None, test_mount_dir: Optional[str] = None): + """ + Function to download the AN4 dataset. This hides pre-processing boilerplate for notebook ASR examples. + + Args: + data_dir: Path to store the data. + train_mount_dir: If you plan to mount the dataset, use this to prepend the mount directory to the + audio filepath in the train manifest. + test_mount_dir: If you plan to mount the dataset, use this to prepend the mount directory to the + audio filepath in the test manifest. + """ + print("******") + os.makedirs(data_dir, exist_ok=True) + if not os.path.exists(data_dir + '/an4_sphere.tar.gz'): + an4_url = 'https://dldata-public.s3.us-east-2.amazonaws.com/an4_sphere.tar.gz' + an4_path = wget.download(an4_url, data_dir) + print(f"Dataset downloaded at: {an4_path}") + else: + print("Tarfile already exists.") + an4_path = data_dir + '/an4_sphere.tar.gz' + + if not os.path.exists(data_dir + '/an4/'): + tar = tarfile.open(an4_path) + tar.extractall(path=data_dir) + + print("Converting .sph to .wav...") + sph_list = glob.glob(data_dir + '/an4/**/*.sph', recursive=True) + for sph_path in sph_list: + wav_path = sph_path[:-4] + '.wav' + cmd = ["sox", sph_path, wav_path] + subprocess.run(cmd) + print("Finished conversion.\n******") + + # Building Manifests + print("******") + train_transcripts = data_dir + '/an4/etc/an4_train.transcription' + train_manifest = data_dir + '/an4/train_manifest.json' + + if not os.path.isfile(train_manifest): + build_manifest(train_transcripts, train_manifest, data_dir, train_mount_dir, 'an4/wav/an4_clstk') + print("Training manifest created.") + + test_transcripts = data_dir + '/an4/etc/an4_test.transcription' + test_manifest = data_dir + '/an4/test_manifest.json' + if not os.path.isfile(test_manifest): + build_manifest(test_transcripts, test_manifest, data_dir, test_mount_dir, 'an4/wav/an4test_clstk') + print("Test manifest created.") + print("***Done***") diff --git a/tutorials/cloud/README.md b/tutorials/cloud/README.md new file mode 100644 index 000000000000..c88c9e138bb1 --- /dev/null +++ b/tutorials/cloud/README.md @@ -0,0 +1,7 @@ +# NeMo Cloud Tutorials + +The best way to get started with NeMo in the cloud. + +## ASR +* [Quickstart: Training an ASR model on AWS SageMaker](aws/SageMaker_ASR_Training.ipynb) +* [Multi-Node Scaling: ASR Fine-Tuning on AWS SageMaker](aws/ASR_Finetuning_at_Scale_with_AWS_SageMaker.ipynb) diff --git a/tutorials/cloud/aws/ASR_Finetuning_at_Scale_with_AWS_SageMaker.ipynb b/tutorials/cloud/aws/ASR_Finetuning_at_Scale_with_AWS_SageMaker.ipynb new file mode 100644 index 000000000000..55423f4092ec --- /dev/null +++ b/tutorials/cloud/aws/ASR_Finetuning_at_Scale_with_AWS_SageMaker.ipynb @@ -0,0 +1,764 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "2e86a2b3", + "metadata": {}, + "source": [ + "# ASR Fine-Tuning at Scale using AWS SageMaker" + ] + }, + { + "cell_type": "markdown", + "id": "215e3d3c", + "metadata": {}, + "source": [ + "In this tutorial we show how you can train a NeMo ASR Model using [Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html) using the CommonVoice Esperanto dataset (~250hrs), and scale onto multiple GPUs and nodes with near-linear scaling.\n", + "\n", + "AWS SageMaker is useful for practioners/researchers who are familiar with training locally or on a remote instance (via SSH). SageMaker also supports multi-GPU & Multi-node." + ] + }, + { + "cell_type": "markdown", + "id": "5e6d484f", + "metadata": {}, + "source": [ + "\"sagemaker_benchmark\"" + ] + }, + { + "cell_type": "markdown", + "id": "81d7883d", + "metadata": {}, + "source": [ + "We fine-tune the [Conformer SSL Large (En)](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/ssl_en_conformer_large) model using the CommonVoice esperanto subset on a remote instance with a 8 GPUs (ml.p3.16xlarge). We use S3 to store the data and our checkpoints/logs.\n", + "\n", + "The overall steps are:\n", + "\n", + "1. Setup your AWS Credentials to access SageMaker\n", + "2. Download the source code we'll be running\n", + "3. Setup the CommonVoice Esperanto dataset\n", + "4. Upload data to S3\n", + "5. Configure the training job\n", + "6. Run training job on SageMaker\n", + "7. Download model, (Optional) Tensorboard Logs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ac621da0", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "You can run either this notebook locally (if you have all the dependencies) or on Google Colab.\n", + "\n", + "Instructions for setting up Colab are as follows:\n", + "1. Open a new Python 3 notebook.\n", + "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", + "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", + "4. Run this cell to set up dependencies.\n", + "5. Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect\n", + "\"\"\"\n", + "# If you're using Google Colab and not running locally, run this cell.\n", + "\n", + "## Install dependencies\n", + "!pip install wget\n", + "!apt-get install sox libsndfile1 ffmpeg\n", + "!pip install text-unidecode\n", + "!pip install matplotlib>=3.3.2\n", + "\n", + "## Install NeMo\n", + "BRANCH = 'main'\n", + "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", + "\n", + "\"\"\"\n", + "Remember to restart the runtime for the kernel to pick up any upgraded packages (e.g. matplotlib)!\n", + "Alternatively, you can uncomment the exit() below to crash and restart the kernel, in the case\n", + "that you want to use the \"Run All Cells\" (or similar) option.\n", + "\"\"\"\n", + "# exit()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61c4fbe2", + "metadata": {}, + "outputs": [], + "source": [ + "pip install sagemaker awscli" + ] + }, + { + "cell_type": "markdown", + "id": "876f553d", + "metadata": {}, + "source": [ + "### 1. Setup SageMaker with AWS Credentials\n", + "\n", + "If you haven't setup your AWS credentials, you can setup using the AWS CLI.\n", + "You will need your access and Secret key, with permissions to use SageMaker and S3." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1328482d", + "metadata": {}, + "outputs": [], + "source": [ + "!aws configure" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "01477d55", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext tensorboard\n", + "from pathlib import Path\n", + "import os\n", + "import sagemaker\n", + "import wget\n", + "import json\n", + "import re\n", + "\n", + "from tqdm import tqdm\n", + "from omegaconf import OmegaConf\n", + "from sagemaker import get_execution_role\n", + "from sagemaker.pytorch import PyTorch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "405806f9", + "metadata": {}, + "outputs": [], + "source": [ + "sess = sagemaker.Session()" + ] + }, + { + "cell_type": "markdown", + "id": "7d099a96", + "metadata": {}, + "source": [ + "### 2. Download the NeMo source code\n", + "\n", + "SageMaker allows you to pass in your own source code, with an entrypoint script.\n", + "\n", + "Below we download the AWS NeMo `config.yaml` which contains our configuration, and the `speech_to_text_ctc_bpe.py` script to run training.\n", + "\n", + "Our folder structure will look like this:\n", + "\n", + " code/\n", + " speech_to_text_ctc_bpe.py\n", + " conf/\n", + " config.yaml\n", + " data/\n", + " manifests/\n", + " raw_data/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0b456c57", + "metadata": {}, + "outputs": [], + "source": [ + "root_dir = Path('./')\n", + "code_dir = root_dir / 'code/'\n", + "config_dir = code_dir / 'conf/'\n", + "data_dir = root_dir / 'data/'\n", + "\n", + "root_dir.mkdir(exist_ok=True)\n", + "code_dir.mkdir(exist_ok=True)\n", + "config_dir.mkdir(exist_ok=True)\n", + "data_dir.mkdir(exist_ok=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "721c7b7d", + "metadata": {}, + "outputs": [], + "source": [ + "config_path = str(config_dir / \"config.yaml\")\n", + "\n", + "# download scripts to format the data source.\n", + "wget.download(\"https://raw.githubusercontent.com/NVIDIA/NeMo/main/scripts/speech_recognition/convert_hf_dataset_to_nemo.py\", str(code_dir))\n", + "wget.download(\"https://raw.githubusercontent.com/NVIDIA/NeMo/main/scripts/speech_recognition/convert_to_tarred_audio_dataset.py\",\n", + " str(code_dir))\n", + "\n", + "# download scripts to run training\n", + "wget.download(\"https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/asr/conf/conformer/conformer_ctc_bpe.yaml\", config_path)\n", + "wget.download(\"https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/asr/asr_ctc/speech_to_text_ctc_bpe.py\",\n", + " str(code_dir))\n", + "\n", + "# download script to create tokenizer\n", + "wget.download(\"https://raw.githubusercontent.com/NVIDIA/NeMo/main/scripts/tokenizers/process_asr_text_tokenizer.py\",\n", + " str(code_dir))" + ] + }, + { + "cell_type": "markdown", + "id": "7934baab", + "metadata": {}, + "source": [ + "We also create a `requirements.txt` file within our source code to install NeMo." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "397d8eef", + "metadata": {}, + "outputs": [], + "source": [ + "with open(code_dir / 'requirements.txt', 'w') as f:\n", + " f.write(\"nemo_toolkit[all]\")" + ] + }, + { + "cell_type": "markdown", + "id": "a7bc7f51", + "metadata": {}, + "source": [ + "### 2.1 Initialize SageMaker within Training Script\n", + "\n", + "We provide a helper function that we require to be imported and run at the top of the training script.\n", + "\n", + "This installs and setups DDP for you. It also alleviates having to import a custom container, and can leverage all of the SageMaker containers. Rather than running this cell, you could also manually do this in your script." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e17535a7", + "metadata": {}, + "outputs": [], + "source": [ + "line = \"from nemo.utils.cloud import initialize_sagemaker; initialize_sagemaker()\"\n", + "with open(code_dir / \"speech_to_text_ctc.py\", 'r+') as f:\n", + " content = f.read()\n", + " f.seek(0, 0)\n", + " f.write(line.rstrip('\\r\\n') + '\\n' + content)" + ] + }, + { + "cell_type": "markdown", + "id": "ce3e5ae8", + "metadata": {}, + "source": [ + "### 3. Setup the CommonVoice Esperanto Dataset\n", + "\n", + "Mozilla Common Voice requires you to use huggingface datasets (`pip install datasets`) and register as a user to generate an API key.\n", + "\n", + "#### Authenticated Setup Steps\n", + "\n", + "Website steps:\n", + "- Visit https://huggingface.co/settings/profile\n", + "- Visit \"Access Tokens\" on list of items.\n", + "- Create new token - provide a name for the token and \"read\" access is sufficient.\n", + " - PRESERVE THAT TOKEN API KEY. You can copy that key for next step.\n", + "- Visit the HuggingFace Dataset page for Mozilla Common Voice\n", + " - There should be a section that asks you for your approval.\n", + " - Make sure you are logged in and then read that agreement.\n", + " - If and only if you agree to the text, then accept the terms." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5cbca268", + "metadata": {}, + "outputs": [], + "source": [ + "# Paste your preserved HF TOKEN API KEY (from above) when asked.\n", + "!huggingface-cli login" + ] + }, + { + "cell_type": "markdown", + "id": "9b964276", + "metadata": {}, + "source": [ + "Now you should be logged in. When running the script, dont forget to set `use_auth_token=True`!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e89e4ee8", + "metadata": {}, + "outputs": [], + "source": [ + "!python ./code/convert_hf_dataset_to_nemo.py \\\n", + " output_dir=./data/ \\\n", + " path=\"mozilla-foundation/common_voice_11_0\" \\\n", + " name=\"eo\" \\\n", + " ensure_ascii=False \\\n", + " use_auth_token=True" + ] + }, + { + "cell_type": "markdown", + "id": "2e34a392", + "metadata": {}, + "source": [ + "We apply filtering/data-processing to the dataset. We've skipped explanations as to why we're filtering, as extensive information can be found [here](https://github.com/andrusenkoau/NeMo/blob/esperanto_example/docs/source/asr/examples/esperanto_asr/esperanto_asr.rst)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ca04023", + "metadata": {}, + "outputs": [], + "source": [ + "dev_manifest = f\"{data_dir}/validation/validation_mozilla-foundation_common_voice_11_0_manifest.json\"\n", + "test_manifest = f\"{data_dir}/test/test_mozilla-foundation_common_voice_11_0_manifest.json\"\n", + "train_manifest = f\"{data_dir}/train/train_mozilla-foundation_common_voice_11_0_manifest.json\"\n", + "\n", + "def compute_char_counts(manifest):\n", + " char_counts = {}\n", + " with open(manifest, 'r') as fn_in:\n", + " for line in tqdm(fn_in, desc=\"Compute counts..\"):\n", + " line = line.replace(\"\\n\", \"\")\n", + " data = json.loads(line)\n", + " text = data[\"text\"]\n", + " for word in text.split():\n", + " for char in word:\n", + " if char not in char_counts:\n", + " char_counts[char] = 1\n", + " else:\n", + " char_counts[char] += 1\n", + " return char_counts\n", + "\n", + "char_counts = compute_char_counts(train_manifest)\n", + "\n", + "threshold = 10\n", + "trash_char_list = []\n", + "\n", + "for char in char_counts:\n", + " if char_counts[char] <= threshold:\n", + " trash_char_list.append(char)\n", + "\n", + "def clear_data_set(manifest, char_rate_threshold=None):\n", + "\n", + " chars_to_ignore_regex = \"[\\.\\,\\?\\:\\-!;()«»…\\]\\[/\\*–‽+&_\\\\½√>€™$•¼}{~—=“\\\"”″‟„]\"\n", + " addition_ignore_regex = f\"[{''.join(trash_char_list)}]\"\n", + "\n", + " manifest_clean = manifest + '.clean'\n", + " war_count = 0\n", + " with open(manifest, 'r') as fn_in, \\\n", + " open(manifest_clean, 'w', encoding='utf-8') as fn_out:\n", + " for line in tqdm(fn_in, desc=\"Cleaning manifest data\"):\n", + " line = line.replace(\"\\n\", \"\")\n", + " data = json.loads(line)\n", + " text = data[\"text\"]\n", + " if char_rate_threshold and len(text.replace(' ', '')) / float(data['duration']) > char_rate_threshold:\n", + " print(f\"[WARNING]: {data['audio_filepath']} has char rate > 15 per sec: {len(text)} chars, {data['duration']} duration\")\n", + " war_count += 1\n", + " continue\n", + " text = re.sub(chars_to_ignore_regex, \"\", text)\n", + " text = re.sub(addition_ignore_regex, \"\", text)\n", + " data[\"text\"] = text\n", + " data = json.dumps(data, ensure_ascii=False)\n", + " fn_out.write(f\"{data}\\n\")\n", + " print(f\"[INFO]: {war_count} files were removed from manifest\")\n", + "\n", + "clear_data_set(dev_manifest)\n", + "clear_data_set(test_manifest)\n", + "clear_data_set(train_manifest, char_rate_threshold=15)" + ] + }, + { + "cell_type": "markdown", + "id": "4b50de19", + "metadata": {}, + "source": [ + "When the dataset is mounted to the SageMaker instance, the filepaths change. To handle this, we change the root path of the manifest entries to point to the correct mounted directory." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e31d9e91", + "metadata": {}, + "outputs": [], + "source": [ + "dev_manifest = dev_manifest + '.clean'\n", + "\n", + "\n", + "def replace_real_dir_with_mount_dir(manifest_path, data_root, mount_dir):\n", + " with open(manifest_path) as f:\n", + " entries = [json.loads(x) for x in f]\n", + " for entry in entries:\n", + " entry['audio_filepath'] = entry['audio_filepath'].replace(str(data_root), str(mount_dir))\n", + " with open(manifest_path + '.mount', 'w') as f:\n", + " f.write('\\n'.join([json.dumps(x) for x in entries]))\n", + "\n", + "replace_real_dir_with_mount_dir(dev_manifest, data_dir.absolute(), \"/opt/ml/input/data/testing/\")" + ] + }, + { + "cell_type": "markdown", + "id": "35d71747", + "metadata": {}, + "source": [ + "To improve performance, we also tar the training dataset to improve throughput when reading/writing from S3 directly within the remote instance." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29d858a2", + "metadata": {}, + "outputs": [], + "source": [ + "train_manifest = train_manifest + '.clean'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bc495ca7", + "metadata": {}, + "outputs": [], + "source": [ + "!python ./code/convert_to_tarred_audio_dataset.py \\\n", + " --manifest_path={train_manifest} \\\n", + " --target_dir=./data/train_tarred_1bk \\\n", + " --num_shards=512 \\\n", + " --max_duration=15.0 \\\n", + " --min_duration=1.0 \\\n", + " --shuffle \\\n", + " --shuffle_seed=1 \\\n", + " --sort_in_shards \\\n", + " --workers=-1" + ] + }, + { + "cell_type": "markdown", + "id": "c850634e", + "metadata": {}, + "source": [ + "Finally, we generate the tokenizer that we will be using for training using the training manifest. Further information about parameters when choosing vocab sizes can be found [here](https://github.com/andrusenkoau/NeMo/blob/esperanto_example/docs/source/asr/examples/esperanto_asr/esperanto_asr.rst). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ca394bcd", + "metadata": {}, + "outputs": [], + "source": [ + "vocab_size=128\n", + "\n", + "!python ./code/process_asr_text_tokenizer.py \\\n", + " --manifest={train_manifest} \\\n", + " --vocab_size={vocab_size} \\\n", + " --data_root=./data/tokenizers \\\n", + " --tokenizer=\"spe\" \\\n", + " --spe_type=bpe \\\n", + " --no_lower_case" + ] + }, + { + "cell_type": "markdown", + "id": "bfa2199e", + "metadata": {}, + "source": [ + "### 5. Upload the Dataset to S3\n", + "\n", + "This can take some time depending on your upload speed. We are uploading roughly 111GB of data to S3." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e1c5a60a", + "metadata": {}, + "outputs": [], + "source": [ + "# optional, move the raw training dataset out of the data folder to reduce data transfer\n", + "shutil.move(f\"{dataset_dir}/train/\", root_dir / \"train_raw/\")\n", + "\n", + "prefix = \"cv_esperanto\"\n", + "bucket = sess.default_bucket()\n", + "\n", + "loc = sess.upload_data(path=str(data_dir), bucket=bucket, key_prefix=prefix)\n", + "loc" + ] + }, + { + "cell_type": "markdown", + "id": "6321e3a9", + "metadata": {}, + "source": [ + "### 4. Configure the training job\n", + "\n", + "Now we configure the training job, by modifying the `config.yaml` file that is stored in our source code directory.\n", + "We pass relative directory paths for the data based on the SageMaker mount directory on the remote instance." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ac709398", + "metadata": {}, + "outputs": [], + "source": [ + "# the output path for the model checkpoints/logs\n", + "output_path = \"s3://\" + sess.default_bucket() + \"/nemo-output/\"" + ] + }, + { + "cell_type": "markdown", + "id": "8186678f", + "metadata": {}, + "source": [ + "We setup the training configuration based on the Esperanto experiments ran [here](https://github.com/andrusenkoau/NeMo/blob/esperanto_example/docs/source/asr/examples/esperanto_asr/esperanto_asr.rst).\n", + "\n", + "If you change the number of GPUs on the instance, remember to change the values below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a4b67faa", + "metadata": {}, + "outputs": [], + "source": [ + "global_batch_size = 1024\n", + "train_batch_size = 8\n", + "n_gpus = 8\n", + "num_nodes = 1 # set this to the number of nodes you'd like to train on\n", + "accumulate_grad_batches = int(global_batch_size / (n_gpus * num_nodes * train_batch_size))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4bb61640", + "metadata": {}, + "outputs": [], + "source": [ + "conf = OmegaConf.load(config_path)\n", + "\n", + "# data config setup\n", + "conf.model.train_ds.batch_size=train_batch_size\n", + "conf.model.train_ds.is_tarred = True\n", + "conf.model.train_ds.tarred_audio_filepaths = \"/opt/ml/input/data/training/train_tarred_1bk/audio__OP_0..511_CL_.tar\"\n", + "conf.model.train_ds.manifest_filepath = \"/opt/ml/input/data/training/train_tarred_1bk/tarred_audio_manifest.json\"\n", + "conf.model.validation_ds.manifest_filepath = \"/opt/ml/input/data/testing/validation/validation_mozilla-foundation_common_voice_11_0_manifest.json.clean.mount\"\n", + "\n", + "# optimization setup\n", + "conf.model.optim.lr = 1e-3\n", + "conf.model.optim.sched.name = \"CosineAnnealing\"\n", + "\n", + "# remove variable which is not needed when using CosineAnnealing\n", + "conf.model.optim.sched.pop('d_model')\n", + "\n", + "# logging/output setup\n", + "conf.exp_manager.exp_dir = \"/opt/ml/model/\"\n", + "\n", + "# the BPE tokenizer location based on the remote instance mounted directory\n", + "conf.model.tokenizer.dir = \"/opt/ml/input/data/training/tokenizers/tokenizer_spe_bpe_v128\"\n", + "\n", + "# the pre-trained model we want to fine-tune\n", + "conf.init_from_nemo_model = \"ssl_en_conformer_large.nemo\"\n", + "\n", + "# training setup\n", + "conf.trainer.accelerator = \"gpu\"\n", + "# enable SageMaker DDP\n", + "conf.trainer.strategy = \"smddp\"\n", + "conf.trainer.max_epochs = 300\n", + "conf.trainer.accumulate_grad_batches = accumulate_grad_batches\n", + "conf.trainer.precision = 16 # if using Ampere or above (i.e ml.p4d.24xlarge), you can use bf16 (BFloat16) \n", + "\n", + "# resume flags if crashes occur\n", + "conf.exp_manager.resume_if_exists=True\n", + "conf.exp_manager.resume_ignore_no_checkpoint=True" + ] + }, + { + "cell_type": "markdown", + "id": "1e287a26", + "metadata": {}, + "source": [ + "Uncomment the below cell you want to use W&B, you'll need to pass your W&B token at a later step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ba11ab8", + "metadata": {}, + "outputs": [], + "source": [ + "# conf.exp_manager.wandb_logger_kwargs.name = \"common_voice_esperanto\"\n", + "# conf.exp_manager.create_wandb_logger = True\n", + "# conf.exp_manager.wandb_logger_kwargs.resume=True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e4b27920", + "metadata": {}, + "outputs": [], + "source": [ + "# save the updated config\n", + "OmegaConf.save(conf, config_dir / 'config.yaml')" + ] + }, + { + "cell_type": "markdown", + "id": "1d8cd4cb", + "metadata": {}, + "source": [ + "Download the pre-trained SSL model from NGC. We'll upload this model with our code to SageMaker." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8d11ff03", + "metadata": {}, + "outputs": [], + "source": [ + "!wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/nemo/ssl_en_conformer_large/versions/1.10.1/zip -O {code_dir}/ssl_en_conformer_large_1.10.1.zip\n", + "!unzip {code_dir}/ssl_en_conformer_large_1.10.1.zip -d {code_dir}" + ] + }, + { + "cell_type": "markdown", + "id": "959da702", + "metadata": {}, + "source": [ + "### 5. Run training on SageMaker\n", + "\n", + "Pass the path of the training and validation data on S3 + the output directory on S3 to the PyTorch estimator, and call fit with the appropriate bucket locations for the training and testing data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d2e44e3", + "metadata": {}, + "outputs": [], + "source": [ + "channels = {\"training\": loc, \"testing\": loc}\n", + "\n", + "role = get_execution_role()\n", + "\n", + "output_path = \"s3://\" + sess.default_bucket() + \"/nemo-output/\"\n", + "\n", + "local_mode = False\n", + "\n", + "if local_mode:\n", + " instance_type = \"local_gpu\"\n", + "else:\n", + " instance_type = \"ml.p3.16xlarge\"\n", + " \n", + "instance_count = num_nodes\n", + "\n", + "est = PyTorch(\n", + " entry_point=\"speech_to_text_ctc_bpe.py\",\n", + " source_dir=\"code\", # directory of your training script\n", + " role=role,\n", + " instance_type=instance_type,\n", + " instance_count=instance_count,\n", + " framework_version=\"1.12.0\",\n", + " py_version=\"py38\",\n", + " volume_size=125,\n", + " output_path=output_path,\n", + " hyperparameters={'config-path': 'conf', 'config-name': 'config'},\n", + " # uncomment if you're enabling WANDB, and pass your own API key.\n", + " # environment={\"WANDB_API_KEY\": \"\"},\n", + " # enable SageMaker Optimized DDP for multi-node scaling\n", + " distribution={\"smdistributed\":{\"dataparallel\":{\"enabled\":True, \"fp16\": True}}},\n", + " input_mode='FastFile' # enables reading from S3 directly, no need to download the data\n", + ")\n", + "\n", + "est.fit(inputs=channels)" + ] + }, + { + "cell_type": "markdown", + "id": "2be67b8e", + "metadata": {}, + "source": [ + "### 6. Download model, (Optional) Tensorboard Logs\n", + "\n", + "SageMaker stores our models/logs within a tar file after training has finished. These can be obtained from S3 like below.\n", + "\n", + "We also visualize the training logs. We suggest using an external logger (such as W&B) to track training progress during the run." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "81cd58d3", + "metadata": {}, + "outputs": [], + "source": [ + "key = est.model_data.replace(\"s3://\" + sess.default_bucket() + '/', '')\n", + "\n", + "sess.boto_session.client(\"s3\", region_name=sess.boto_region_name).download_file(\n", + " Bucket=sess.default_bucket(), Key=key, Filename='model.tar.gz',\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "656d53f3", + "metadata": {}, + "outputs": [], + "source": [ + "!tar -xvzf model.tar.gz" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "62018b6e", + "metadata": {}, + "outputs": [], + "source": [ + "%tensorboard --logdir ./ --host 0.0.0.0" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tutorials/cloud/aws/SageMaker_ASR_Training.ipynb b/tutorials/cloud/aws/SageMaker_ASR_Training.ipynb new file mode 100644 index 000000000000..ad5261dc1e21 --- /dev/null +++ b/tutorials/cloud/aws/SageMaker_ASR_Training.ipynb @@ -0,0 +1,418 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "2e86a2b3", + "metadata": {}, + "source": [ + "# NeMo ASR Training Using AWS SageMaker" + ] + }, + { + "cell_type": "markdown", + "id": "215e3d3c", + "metadata": {}, + "source": [ + "In this tutorial we show how you can train a NeMo ASR Model using [Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html). This is meant to be a minimalistic example of how to use SageMaker with NeMo.\n", + "\n", + "AWS SageMaker is useful for practioners/researchers who are familiar with training locally or on a remote instance (via SSH). SageMaker also supports multi-GPU & Multi-node.\n", + "\n", + "Using AWS SageMaker we train a simple Conformer CTC model using the AN4 dataset on a remote instance with a GPU (p3.2xlarge). We use S3 to store the data and our checkpoints/logs.\n", + "\n", + "The overall steps are:\n", + "\n", + "1. Setup your AWS Credentials to access SageMaker\n", + "2. Download the source code we'll be running\n", + "3. Setup AN4 dataset, upload data to S3\n", + "4. Configure the training job\n", + "5. Run training job on SageMaker\n", + "6. Download model, (Optional) Tensorboard Logs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ac621da0", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", + "\n", + "Instructions for setting up Colab are as follows:\n", + "1. Open a new Python 3 notebook.\n", + "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", + "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", + "4. Run this cell to set up dependencies.\n", + "5. Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect\n", + "\"\"\"\n", + "# If you're using Google Colab and not running locally, run this cell.\n", + "\n", + "## Install dependencies\n", + "!pip install wget\n", + "!apt-get install sox libsndfile1 ffmpeg\n", + "!pip install text-unidecode\n", + "!pip install matplotlib>=3.3.2\n", + "\n", + "## Install NeMo\n", + "BRANCH = 'main'\n", + "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", + "\n", + "\"\"\"\n", + "Remember to restart the runtime for the kernel to pick up any upgraded packages (e.g. matplotlib)!\n", + "Alternatively, you can uncomment the exit() below to crash and restart the kernel, in the case\n", + "that you want to use the \"Run All Cells\" (or similar) option.\n", + "\"\"\"\n", + "# exit()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61c4fbe2", + "metadata": {}, + "outputs": [], + "source": [ + "pip install sagemaker awscli" + ] + }, + { + "cell_type": "markdown", + "id": "876f553d", + "metadata": {}, + "source": [ + "### 1. Setup SageMaker with AWS Credentials\n", + "\n", + "If you haven't setup your AWS credentials, you can setup using the AWS CLI.\n", + "You will need your access and Secret key, with permissions to use SageMaker and S3." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1328482d", + "metadata": {}, + "outputs": [], + "source": [ + "!aws configure" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "01477d55", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext tensorboard\n", + "from pathlib import Path\n", + "import os\n", + "import sagemaker\n", + "import wget\n", + "from omegaconf import OmegaConf\n", + "from sagemaker import get_execution_role\n", + "from sagemaker.pytorch import PyTorch\n", + "\n", + "from nemo.utils.notebook_utils import download_an4" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "405806f9", + "metadata": {}, + "outputs": [], + "source": [ + "sess = sagemaker.Session()" + ] + }, + { + "cell_type": "markdown", + "id": "7d099a96", + "metadata": {}, + "source": [ + "### 2. Download the NeMo source code\n", + "\n", + "SageMaker allows you to pass in your own source code, with an entrypoint script.\n", + "\n", + "Below we download the AWS NeMo `config.yaml` which contains our configuration, and the `speech_to_text_ctc.py` script to run training.\n", + "\n", + "Our folder structure will look like this:\n", + "\n", + " code/\n", + " speech_to_text_ctc.py\n", + " conf/\n", + " config.yaml\n", + " data/\n", + " an4/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0b456c57", + "metadata": {}, + "outputs": [], + "source": [ + "root_dir = Path('./an4_nemo_sagemaker/')\n", + "code_dir = root_dir / 'code/'\n", + "config_dir = code_dir / 'conf/'\n", + "data_dir = root_dir / 'data/'\n", + "\n", + "root_dir.mkdir(exist_ok=True)\n", + "code_dir.mkdir(exist_ok=True)\n", + "config_dir.mkdir(exist_ok=True)\n", + "data_dir.mkdir(exist_ok=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "721c7b7d", + "metadata": {}, + "outputs": [], + "source": [ + "config_path = str(config_dir / \"config.yaml\")\n", + "wget.download(\"https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/asr/conf/conformer/conformer_ctc_char.yaml\", config_path)\n", + "wget.download(\"https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/asr/asr_ctc/speech_to_text_ctc.py\", str(code_dir))" + ] + }, + { + "cell_type": "markdown", + "id": "7934baab", + "metadata": {}, + "source": [ + "We also create a `requirements.txt` file within our source code to install NeMo." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "397d8eef", + "metadata": {}, + "outputs": [], + "source": [ + "with open(code_dir / 'requirements.txt', 'w') as f:\n", + " f.write(\"nemo_toolkit[all]\")" + ] + }, + { + "cell_type": "markdown", + "id": "a7bc7f51", + "metadata": {}, + "source": [ + "### 2.1 Initialize SageMaker within Training Script\n", + "\n", + "We provide a helper function that we require to be imported and run at the top of the training script.\n", + "\n", + "This installs and setups DDP for you. It also alleviates having to import a custom container, and can leverage all of the SageMaker containers. Rather than running this cell, you could also manually do this in your script." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e17535a7", + "metadata": {}, + "outputs": [], + "source": [ + "line = \"from nemo.utils.cloud import initialize_sagemaker; initialize_sagemaker()\"\n", + "with open(code_dir / \"speech_to_text_ctc.py\", 'r+') as f:\n", + " content = f.read()\n", + " f.seek(0, 0)\n", + " f.write(line.rstrip('\\r\\n') + '\\n' + content)" + ] + }, + { + "cell_type": "markdown", + "id": "bfa2199e", + "metadata": {}, + "source": [ + "### 3. Setup the AN4 Dataset, upload data to S3\n", + "\n", + "We now download our training and validation data, uploading to S3 so that SageMaker can mount our data to the instance at runtime." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e1c5a60a", + "metadata": {}, + "outputs": [], + "source": [ + "# within the SageMaker container, mount_dir will be where our data is stored.\n", + "download_an4(\n", + " data_dir=str(data_dir),\n", + " train_mount_dir=\"/opt/ml/input/data/training/\",\n", + " test_mount_dir=\"/opt/ml/input/data/testing/\",\n", + ")\n", + "\n", + "# Upload to the default bucket\n", + "prefix = \"an4\"\n", + "bucket = sess.default_bucket()\n", + "loc = sess.upload_data(path=str(data_dir), bucket=bucket, key_prefix=prefix)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b1de1089", + "metadata": {}, + "outputs": [], + "source": [ + "output_path = \"s3://\" + sess.default_bucket() + \"/nemo-output/\"" + ] + }, + { + "cell_type": "markdown", + "id": "6321e3a9", + "metadata": {}, + "source": [ + "### 4. Configure the training job\n", + "\n", + "Now we configure the training job, by modifying the `config.yaml` file that is stored in our source code directory.\n", + "We pass relative directory paths for the data based on the SageMaker mount directory on the remote instance." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4bb61640", + "metadata": {}, + "outputs": [], + "source": [ + "conf = OmegaConf.load(config_path)\n", + "\n", + "# Set Data Locations based on the mounted directory in the SageMaker instance\n", + "conf.model.train_ds.manifest_filepath = \"/opt/ml/input/data/training/an4/train_manifest.json\"\n", + "conf.model.validation_ds.manifest_filepath = \"/opt/ml/input/data/testing/an4/test_manifest.json\"\n", + "conf.trainer.accelerator = \"gpu\"\n", + "conf.trainer.max_epochs = 150\n", + "\n", + "# Output directory for our experiment within the SageMaker instance\n", + "conf.exp_manager.exp_dir=\"/opt/ml/model/\"\n", + "\n", + "# Create a Small Variant of the Conformer Model\n", + "conf.model.encoder.n_layers = 8\n", + "conf.model.n_heads = 4\n", + "conf.model.spec_augment.time_masks = 5\n", + "\n", + "# Set Optimizer parameters\n", + "conf.model.optim.lr = 2.0 # by default we using Noam scheduling, the LR is a multiplier \n", + "\n", + "OmegaConf.save(conf, config_dir / 'config.yaml')" + ] + }, + { + "cell_type": "markdown", + "id": "959da702", + "metadata": {}, + "source": [ + "### 5. Run training on SageMaker\n", + "\n", + "Pass the path of the training and validation data on S3 + the output directory on S3 to the PyTorch estimator, and call fit with the appropriate bucket locations for the training and testing data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d2e44e3", + "metadata": {}, + "outputs": [], + "source": [ + "channels = {\"training\": loc, \"testing\": loc}\n", + "\n", + "role = get_execution_role()\n", + "\n", + "# Set to True to enable SageMaker to run locally\n", + "local_mode = False\n", + "\n", + "if local_mode:\n", + " instance_type = \"local_gpu\"\n", + "else:\n", + " instance_type = \"ml.p3.2xlarge\"\n", + "\n", + "est = PyTorch(\n", + " entry_point=\"speech_to_text_ctc.py\", # the script we want to run\n", + " source_dir=str(code_dir), # where our conf/script is\n", + " role=role,\n", + " instance_type=instance_type,\n", + " instance_count=1,\n", + " framework_version=\"1.12.0\", # version of PyTorch\n", + " py_version=\"py38\",\n", + " volume_size=250,\n", + " output_path=output_path,\n", + " hyperparameters={'config-path': 'conf'},\n", + ")\n", + "\n", + "est.fit(inputs=channels)" + ] + }, + { + "cell_type": "markdown", + "id": "2be67b8e", + "metadata": {}, + "source": [ + "### 6. Download model, (Optional) Tensorboard Logs\n", + "\n", + "SageMaker stores our models/logs within a tar file after training has finished. These can be obtained from S3 like below.\n", + "\n", + "We also visualize the training logs. We suggest using an external logger (such as W&B) to track training progress during the run." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "81cd58d3", + "metadata": {}, + "outputs": [], + "source": [ + "key = est.model_data.replace(\"s3://\" + sess.default_bucket() + '/', '')\n", + "\n", + "sess.boto_session.client(\"s3\", region_name=sess.boto_region_name).download_file(\n", + " Bucket=sess.default_bucket(), Key=key, Filename='model.tar.gz',\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "656d53f3", + "metadata": {}, + "outputs": [], + "source": [ + "!tar -xvzf model.tar.gz" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "62018b6e", + "metadata": {}, + "outputs": [], + "source": [ + "%tensorboard --logdir ./ --host 0.0.0.0" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tutorials/cloud/aws/images/sagemaker_benchmark.png b/tutorials/cloud/aws/images/sagemaker_benchmark.png new file mode 100644 index 000000000000..1d2bf6a5a259 Binary files /dev/null and b/tutorials/cloud/aws/images/sagemaker_benchmark.png differ