From 927b21d1cb298803f4fcb97772f0d2d0af5046d6 Mon Sep 17 00:00:00 2001 From: krishung5 Date: Wed, 6 Mar 2024 02:19:44 -0800 Subject: [PATCH] Remove $ --- examples/auto_complete/README.md | 10 +++--- examples/bls/README.md | 38 +++++++++++----------- examples/decoupled/README.md | 14 ++++---- examples/jax/README.md | 24 +++++++------- examples/preprocessing/README.md | 56 ++++++++++++++++++++++++-------- inferentia/README.md | 24 +++++++------- 6 files changed, 97 insertions(+), 69 deletions(-) diff --git a/examples/auto_complete/README.md b/examples/auto_complete/README.md index f530da3a..b07e065c 100644 --- a/examples/auto_complete/README.md +++ b/examples/auto_complete/README.md @@ -1,5 +1,5 @@ + # **Preprocessing Using Python Backend Example** This example shows how to preprocess your inputs using Python backend before it is passed to the TensorRT model for inference. This ensemble model includes an image preprocessing model (preprocess) and a TensorRT model (resnet50_trt) to do inference. @@ -5,39 +33,39 @@ This example shows how to preprocess your inputs using Python backend before it Run onnx_exporter.py to convert ResNet50 PyTorch model to ONNX format. Width and height dims are fixed at 224 but dynamic axes arguments for dynamic batching are used. Commands from the 2. and 3. subsections shall be executed within this Docker container. - $ docker run -it --gpus=all -v $(pwd):/workspace nvcr.io/nvidia/pytorch:xx.yy-py3 bash - $ pip install numpy pillow torchvision - $ python onnx_exporter.py --save model.onnx + docker run -it --gpus=all -v $(pwd):/workspace nvcr.io/nvidia/pytorch:xx.yy-py3 bash + pip install numpy pillow torchvision + python onnx_exporter.py --save model.onnx **2. Create the model repository:** - $ mkdir -p model_repository/ensemble_python_resnet50/1 - $ mkdir -p model_repository/preprocess/1 - $ mkdir -p model_repository/resnet50_trt/1 + mkdir -p model_repository/ensemble_python_resnet50/1 + mkdir -p model_repository/preprocess/1 + mkdir -p model_repository/resnet50_trt/1 # Copy the Python model - $ cp model.py model_repository/preprocess/1 + cp model.py model_repository/preprocess/1 **3. Build a TensorRT engine for the ONNX model** Set the arguments for enabling fp16 precision --fp16. To enable dynamic shapes use --minShapes, --optShapes, and maxShapes with --explicitBatch: - $ trtexec --onnx=model.onnx --saveEngine=./model_repository/resnet50_trt/1/model.plan --explicitBatch --minShapes=input:1x3x224x224 --optShapes=input:1x3x224x224 --maxShapes=input:256x3x224x224 --fp16 + trtexec --onnx=model.onnx --saveEngine=./model_repository/resnet50_trt/1/model.plan --explicitBatch --minShapes=input:1x3x224x224 --optShapes=input:1x3x224x224 --maxShapes=input:256x3x224x224 --fp16 **4. Run the command below to start the server container:** Under python_backend/examples/preprocessing, run this command to start the server docker container: - $ docker run --gpus=all -it --rm -p8000:8000 -p8001:8001 -p8002:8002 -v$(pwd):/workspace/ -v/$(pwd)/model_repository:/models nvcr.io/nvidia/tritonserver:xx.yy-py3 bash - $ pip install numpy pillow torchvision - $ tritonserver --model-repository=/models + docker run --gpus=all -it --rm -p8000:8000 -p8001:8001 -p8002:8002 -v$(pwd):/workspace/ -v/$(pwd)/model_repository:/models nvcr.io/nvidia/tritonserver:xx.yy-py3 bash + pip install numpy pillow torchvision + tritonserver --model-repository=/models **5. Start the client to test:** Under python_backend/examples/preprocessing, run the commands below to start the client Docker container: - $ wget https://raw.githubusercontent.com/triton-inference-server/server/main/qa/images/mug.jpg -O "mug.jpg" - $ docker run --rm --net=host -v $(pwd):/workspace/ nvcr.io/nvidia/tritonserver:xx.yy-py3-sdk python client.py --image mug.jpg - $ The result of classification is:COFFEE MUG + wget https://raw.githubusercontent.com/triton-inference-server/server/main/qa/images/mug.jpg -O "mug.jpg" + docker run --rm --net=host -v $(pwd):/workspace/ nvcr.io/nvidia/tritonserver:xx.yy-py3-sdk python client.py --image mug.jpg + The result of classification is:COFFEE MUG Here, since we input an image of "mug" and the inference result is "COFFEE MUG" which is correct. diff --git a/inferentia/README.md b/inferentia/README.md index 6a90740d..381c8ed8 100644 --- a/inferentia/README.md +++ b/inferentia/README.md @@ -60,18 +60,18 @@ or simply clone with https. Clone this repo with Github to home repo `/home/ubuntu`. ``` - $chmod 777 /home/ubuntu/python_backend/inferentia/scripts/setup-pre-container.sh - $sudo /home/ubuntu/python_backend/inferentia/scripts/setup-pre-container.sh + chmod 777 /home/ubuntu/python_backend/inferentia/scripts/setup-pre-container.sh + sudo /home/ubuntu/python_backend/inferentia/scripts/setup-pre-container.sh ``` Then, start the Triton instance with: ``` - $docker run --device /dev/neuron0 -v /home/ubuntu/python_backend:/home/ubuntu/python_backend -v /lib/udev:/mylib/udev --shm-size=1g --ulimit memlock=-1 -p 8000:8000 -p 8001:8001 -p 8002:8002 --ulimit stack=67108864 -ti nvcr.io/nvidia/tritonserver:-py3 + docker run --device /dev/neuron0 -v /home/ubuntu/python_backend:/home/ubuntu/python_backend -v /lib/udev:/mylib/udev --shm-size=1g --ulimit memlock=-1 -p 8000:8000 -p 8001:8001 -p 8002:8002 --ulimit stack=67108864 -ti nvcr.io/nvidia/tritonserver:-py3 ``` Note 1: The user would need to list any neuron device to run during container initialization. For example, to use 4 neuron devices on an instance, the user would need to run with: ``` - $docker run --device /dev/neuron0 --device /dev/neuron1 --device /dev/neuron2 --device /dev/neuron3 ...` + docker run --device /dev/neuron0 --device /dev/neuron1 --device /dev/neuron2 --device /dev/neuron3 ...` ``` Note 2: `/mylib/udev` is used for Neuron parameter passing. @@ -81,7 +81,7 @@ Note 3: For Triton container version xx.yy, please refer to After starting the Triton container, go into the `python_backend` folder and run the setup script. ``` - $source /home/ubuntu/python_backend/inferentia/scripts/setup.sh + source /home/ubuntu/python_backend/inferentia/scripts/setup.sh ``` This script will: 1. Install necessary dependencies @@ -118,7 +118,7 @@ triton python model directory. An example invocation for the `gen_triton_model.py` for PyTorch model can look like: ``` - $python3 inferentia/scripts/gen_triton_model.py --model_type pytorch --triton_input INPUT__0,INT64,4x384 INPUT__1,INT64,4x384 INPUT__2,INT64,4x384 --triton_output OUTPUT__0,INT64,4x384 OUTPUT__1,INT64,4x384 --compiled_model /home/ubuntu/bert_large_mlperf_neuron_hack_bs1_dynamic.pt --neuron_core_range 0:3 --triton_model_dir bert-large-mlperf-bs1x4 + python3 inferentia/scripts/gen_triton_model.py --model_type pytorch --triton_input INPUT__0,INT64,4x384 INPUT__1,INT64,4x384 INPUT__2,INT64,4x384 --triton_output OUTPUT__0,INT64,4x384 OUTPUT__1,INT64,4x384 --compiled_model /home/ubuntu/bert_large_mlperf_neuron_hack_bs1_dynamic.pt --neuron_core_range 0:3 --triton_model_dir bert-large-mlperf-bs1x4 ``` In order for the script to treat the compiled model as TorchScript @@ -161,7 +161,7 @@ script to generate triton python model directory. An example invocation for the `gen_triton_model.py` for TensorFlow model can look like: ``` - $python3 gen_triton_model.py --model_type tensorflow --compiled_model /home/ubuntu/inferentia-poc-2.0/scripts-rn50-tf-native/resnet50_mlperf_opt_fp16_compiled_b5_nc1/1 --neuron_core_range 0:3 --triton_model_dir rn50-1neuroncores-bs1x1 + python3 gen_triton_model.py --model_type tensorflow --compiled_model /home/ubuntu/inferentia-poc-2.0/scripts-rn50-tf-native/resnet50_mlperf_opt_fp16_compiled_b5_nc1/1 --neuron_core_range 0:3 --triton_model_dir rn50-1neuroncores-bs1x1 ``` NOTE: Unlike TorchScript model, TensorFlow SavedModel stores sufficient @@ -215,7 +215,7 @@ a valid torchscript file or tensorflow savedmodel. Now, the server can be launched with the model as below: ``` - $tritonserver --model-repository + tritonserver --model-repository ``` Note: @@ -255,7 +255,7 @@ contains the necessary files to set up testing with a simple add_sub model. The requires an instance with more than 8 inferentia cores to run, eg:`inf1.6xlarge`. start the test, run ``` - $source /python_backend/inferentia/qa/setup_test_enviroment_and_test.sh + source /python_backend/inferentia/qa/setup_test_enviroment_and_test.sh ``` where `` is usually `/home/ubuntu`/. This script will pull the [server repo](https://github.com/triton-inference-server/server) @@ -265,7 +265,7 @@ Triton Server and Triton SDK. Note: If you would need to change some of the tests in the server repo, you would need to run ``` - $export TRITON_SERVER_REPO_TAG= + export TRITON_SERVER_REPO_TAG= ``` before running the script. @@ -273,8 +273,8 @@ before running the script. ## pytorch-neuronx and tensorflow-neuronx 1. Similar to the steps for inf1, change the argument to the pre-container and on-container setup scripts to include the `-inf2` or `-trn1`flags e.g., ``` - $chmod 777 /home/ubuntu/python_backend/inferentia/scripts/setup-pre-container.sh - $sudo /home/ubuntu/python_backend/inferentia/scripts/setup-pre-container.sh -inf2 + chmod 777 /home/ubuntu/python_backend/inferentia/scripts/setup-pre-container.sh + sudo /home/ubuntu/python_backend/inferentia/scripts/setup-pre-container.sh -inf2 ``` 2. On the container, followed by the `docker run` command, you can pass similar argument to the setup.sh script For Pytorch: