diff --git a/.azure-pipelines/scripts/codeScan/codespell/inc_dict.txt b/.azure-pipelines/scripts/codeScan/codespell/inc_dict.txt new file mode 100644 index 00000000000..91e466f168a --- /dev/null +++ b/.azure-pipelines/scripts/codeScan/codespell/inc_dict.txt @@ -0,0 +1,14 @@ +activ +ans +datas +ende +lates +masia +mutli +nd +ot +rouge +te +tne +ue +womens diff --git a/.azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt b/.azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt deleted file mode 100644 index b5b7e7e2bea..00000000000 --- a/.azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt +++ /dev/null @@ -1,2734 +0,0 @@ -ABI -ACDC -ADDR -ADE -AGS -ALLREDUCE -AMD -AMX -APIs -APl -APm -APs -ASPLOS -AVX -AWQ -AWS -AWSSageMakerSupport -Abc -AbcAdaptor -AbcTuneStrategy -Acc -AccuracyCriterion -AccuracyLoss -Acknowledgement -Adadelta -AdamW -Adaptor -AddEmbeddings -AddN -AddV -AdvProp -Affine -Ajanthan -AlexNet -Alibaba -AlignImageChannel -Alireza -Aman -Amodei -AmpConf -Analytics -Anastasiia -AnchorGenerator -ArcFace -ArgMax -Arial -AttentionReshape -AttributeProto -AutoINC -AutoMixPrecision -AutoModelForSequenceClassification -AutoQuant -AutoTokenizer -Autoregressive -AverageMeter -AvgPool -BERT's -BERTDataSet -BFP -BGR -BLAS -BLEU -BNInception -BV -BasicNAS -BasicTokenizer -BatchMatMul -BatchMatMulV -BatchNorm -BayesianOptimization -BenchmarkConf -BenchmarkConfig -Benchmarking -Bengio -Benoît -BertAdam -BertConfig -BertForMaskedLM -BertForNextSentencePrediction -BertForPreTraining -BertForQuestionAnswering -BertForSequenceClassification -BertForTokenClassification -BertModel -BertTokenizer -BiDAF -BiLSTM -Bianchi -BiasAdd -BibTeX -BigDL -BigDLNanoSupport -BiliBili -BilinearImagenet -BinaryAdd -Biomedical -BladeDISC -BlendCNN -BlendCnn -BoW -Boudoukh -BoxList -BraTS -BrainTumour -Btg -Builtin -CCE -CCFF -CCL -CERN's -CFLAGS -CHANGELOG -CHZ -CIFAR -CLA -CLI -CLM -CLX -CMAKE -CMU -CMake -CMakeLists -CNWXA -COCODataset -COCODemo -COCOEval -COCONpy -COCORaw -COCORecord -COCOmAP -COCOmAPv -CONDA -CPUExecutionProvider -CPUs -CPX -CPz -CTRLModel -CTRLTokenizer -CUDAExecutionProvider -CUDAToolKit -CUHK -CVF -CXX -CYP -Cadene -Caffe -CaffeNet -CaffeResNet -CaffeResnet -Caffenet -Caiming -CamemBERT -Carbonell -CascadeFullRes -CentOS -CenterCrop -Centre -Cesa -ChamNet -Changelog -Chatbot -Chaumond -Cheng -Cho's -Chongruo -Chuanqi -Cistac -Cityscapes -ClassPredictionTower -Clergerie -CoCo -CoLA -CoNLL -CodeGenerator -CodeXGLUE -ColorJitter -CompVis -ConcatV -CondConv -Condensenet -Config -Conneau -ConstDataLoader -ConstantOfShape -Conv -ConvBNReLU -ConvNets -ConvPerStage -ConvReLU -ConvertingSSDMobilenetToONNX -Convolutional -CoreML -Criteo -CriteoTerabyte -CropResize -CropToBoundingBox -CrossEntropyLoss -Curran -CustomDataset -CustomObj -CvAClvFfyA -DBMDZ -DCMAKE -DDP -DDR -DENABLE -DETR -DFS -DFabiansResUNet -DGAN -DKFZ -DLRM's -DMQA -DNNL -DPNs -DUC -DUNet -DUnetCNN -DagnyT -Dai -Danqi -Darknet -DataLoader -DataLoadermodule -DataParallel -DataProcessor -Dataset -DatasetAnalyzer -Dathathri -Dbzg -DeBERTa -DeQuantStub -DeQuantize -DecodeImage -DeepLab -DeepLabV -DeepLearningExamples -Delangue -DenseNet -DepthwiseConv -DequantStub -DequantizeLinear -Detectron -Dettmers -DevCloud -Devlin -DialoGPT -Discrim -Distil -DistilBERT -DistilBert -DistilBertModel -DistilBertTokenizer -DistilGPT -DistilRoBERTa -Distillated -DistillationConf -DistillationConfig -DistilmBERT -DistributedDataParallel -DistributedOptimizer -DistributedSampler -Djamé -DnnlExecutionProvider -Dockerfile -DropOut -DualPathNet -DualPathNetworks -DummyDataLoader -Dupont -Durand -DyNAS -DyNas -EAQkaohzrJbd -EMC -EdgeTPU -Edouard -EfficientNet -EfficientNets -EleutherAI -EmbedLayerNormalization -EncodeJped -Erf -Errno -ExperimentPlanner -ExperimentPlanners -Extensibility -EzjbRL -FAC -FBNet -FBResNet -FCN -FERPlus -FFFFFF -FLOPs -FP -FPN -FQqOuW -FRN -FUNSD -FWK -FabiansUNet -FaceBook -FakeQuant -Farhadi -FashionMNIST -FastFormers -FasterRCNN -FeatureExtractor -Feng -FileNotFoundError -Finetune -FlatMapDataset -FlauBERT -Flavours -FloatFunctional -FloatTensor -Florian -FrameworkModel -Frantar -FromConfig -Fu -Funtowicz -FusedConv -GANs -GCP -GEMM -GFLOPs -GLIBCXX -GLOG -GLUE -GN -GPG -GPTJ -GPTQ -GPUs -GQm -Galata -GameAI -Garnett -Gelu -GeluOperator -GenEfficientNet -GenericPreprocessor -GermEval -GetStrides -Gimpel -Girshick -GitHub -GlobalAveragePool -Gluon -GluonCV -GoogleNet -Governers -Goyal -GraphDef -GraphModule -GraphModules -GraphProto -Grauman -Graviton -Guangxuan -Gui -Guillaume -Guoming -HAWQ -HBM -HOROVOD -HPO -HTnwXegLGNAtw -HWs -HYPJUDY -Haibin -Haihao -Hanwen -HasAns -HdQ -Hein -Hellaswag -HelloWorld -Hippocampus -HistogramObserver -Horovod -Hounsfield -HqEgzS -Hu -HuBERT -HuggingFace -HuggingFace's -HuggingFacesTS -HybirdBlock -HybridBlock -IC -ICCV -IEEE -ILSVR -ILSVRC -IML -IMS -INTRA -IOMP -IPEX -IRQ -ISA -Icelake -Ilya -ImageFolder -ImageList -ImageNet -ImageRecord -ImageRecordIter -ImagenetRaw -ImportError -InceptionResNetV -InceptionResnetV -InceptionV -IndexType -InnerProduct -InputData -InputExample -InputFile -Inria -IntelAI -IntelCaffe -IntelDevTools -IntelNeuralCompressor -IntelON -IntermediateLayersKnowledgeDistillationLossConfig -InvertedResidual -IoU -Isensee -IssueQuery -IssueQueryThreads -IteratorGetNext -JIRA -JPEGImages -Javascript -Jens -Ji -Jie -Jingfei -Jiong -Joshi -Julien -JunWang -JupyterLab -JupyterLab's -Jäger -KH -KMP -KaimingHe -Karthik -Keskar -Keypoint -KnowledgeDistillationLoss -KnowledgeDistillationLossConfig -Kullback -Kyunghyun -LLM -LLMs -LMHeadModel -LOADGEN -LOC -LOCderiv -LOCpart -LOGLEVEL -LPOT -LPOT's -LSVRC -LTS -LaTeX -LabelBalance -LabelShift -Lample -Lan -LanguageModeling -Lapata -Larey -Larochelle -LastLayerShape -Lavin -LayerNorm -LayoutLM -LayoutLMv -LeakyRelu -Lecun -Leibler -LessEqual -LiTS -LibriSpeech -LinkedIn -ListDataset -Liu -Liu's -LoadGen -LoadGen's -LoadImage -LoadgenAPI -LoadgenAPITestSettings -LoadgenVersion -LogSettings -LokuUdeVg -Louf -LowPrecisionInferenceTool -Lp -Luan -Lysandre -MACOSX -MALLOC -MAdds -MICCAI -MKL -MLAS -MLPerf -MLefficiency -MLperf -MMLAB -MNASNet -MNIST -MNLI -MRPC -MSD -MSE -MSELoss -MSFT -MSR -MULTISCALE -MXNet -MYTASK -MYTASKNAME -Madotto -MagnitudePrunePolicy -Maier -MakeIterator -Makefile -MakefileGnProj -Mandar -Manmatha -Mapillary -MarkDown -MaskPostProcessor -MaskRCNN -MaskRCNNFPNFeatureExtractor -Massa -MatMul -MatMulInteger -MatMulIntegerToFloat -MatMulWithBias -MatMulWithBiasAdd -MatMulWithBiasGelu -MatMulWithBiasTanh -MaxPool -McCann -McKinstry -MeanSquaredError -Medcial -Migacz -MinMax -MinMaxObserver -Mingda -MiniLM -Mirella -Mish -MixNet -MixedConv -MixedPrecision -MixedPrecisionConfig -MobileBERT -MobileNet -MobileNetV -MobileNetv -MobilenetV -Mobilenetv -Modalities -ModelConversion -ModelProto -ModelSize -ModelTC -ModelZoo -ModuleDict -ModuleNotFoundError -Molino -Moshe -Multi -MultiNLI -MultiStream -MultiStream's -MultiStreamFree -MxNet -MyDataset -MyLearning -MyMetric -Mykhailo -Myle -NAS -NASBase -NASConfig -NASNet -NCCL -NCHW -NDArray -NER -NGPUS -NHWC -NIC -NLG -NLP -NLPToolkit -NLU -NNZ -NPM -NUMA -NVAITC -NVIDIA -NVIDIA's -NVidia -Naman -Namhoon -Nano -Narasimhan -NeelNanda -NetEase -Netron -NeurIPS -NeuralCompressor -NewDataloader -NewMetric -NextPlatform -Nezha -Nijmegen -Nitish -NoNormalization -NodeJS -NoisyStudent -NonNestedTuple -NonZero -Nonlinearity -NormalFloat -Nsh -Ntsk -Nx -NxM -OC -OMP -ONNX -ONNXCommunityMeetup -ONNXConfig -ONNXQuantizer -ONNXRT -ONNXRTAdaptor -OOM -OOQtYMH -OPs -ORGderiv -ORGpart -OTH -OTHderiv -OTHpart -OaaS -Ofir -Omer -OnnxRuntime -OpenAI -OpenAI's -OpenAIAdam -OpenAIGPTModel -OpenAIGPTTokenizer -OpenCV -OpenMP -OpenVINO -OpenWebTextCorpus -OperatorConfig -Optimizations -Optimizers -OrderedDict -Ott -Outlier -OutputData -PERderiv -PERpart -PIL -PLM -PLg -PNASNet -POC -PPLM -PQ -PR -PRETAINED -PRs -PTQ -PWC -PWD -PWDEBUG -PYTHONPATH -PZ -PaddingSequence -Panoptic -Parinov -ParseDecodeImagenet -ParseDecodeVoc -Parzen -Peason -PeleeNet -Penghui -Pengxin -PerChannelMinMaxObserver -PhYUmn -Piero -Pierric -Piqa -Piyush -PolyNet -Ponte -Pooler -PostPostTrainingQuantConfig -PostProcessor -PostTrainingQuantConfig -PostTransform -PowerTools -PreSumm -Prec -Preload -Preloading -Preprocessor -PreprocessorFor -Preprocessors -ProgressBar -Protobuf -PrunePolicy -Pruning's -PruningConf -PubTables -PyObject -PyPI -PyPi -PySUT -PyTorch -PyTorchKnowledgeDistillationLoss -PyYAML -PythonAPI -PythonLauncher -QAT -QConfig -QDQ -QIntegerops -QLinear -QLinearOps -QLinearOpsAdaptor -QLinearops -QLoRA -QNLI -QOperator -QPS -QQP -QSL -QiaoranC -Qlora -QnA -QuaLA -QuantConf -QuantStub -Quantizable -Quantization -QuantizationAwareTrainingConfig -QuantizeLinear -QuantizedConv -QuantizedConvReLU -QuantizedInput -Quantizes -QueryBackendCapability -QuerySampleComplete -QuerySampleLibrary -QuickStart -Quickstart -Quoc -R'emi -README -RESTful -RFB -RGB -RMSE -RNN -ROC -RPN -RPNHead -RPNPostProcessor -RTN -RTX -Radboud -Radford -Radu -RandAug -RandAugment -RandomCrop -RandomHorizontalFlip -RandomResizedCrop -RandomVerticalFlip -Rault -ReLU -ReadmeBuild -ReadmeFAQ -ReadmeHtmlDocs -ReadmeTests -Realtime -RecordIO -RecordingObserver -Redmon -ReduceMean -RegNet -Rehm -Releaser -Relu -ResNeSt -ResNeXt -ResNest -ResNet -ResNetV -ResNext -Rescale -ResencUNet -Resize -ResizeCropImagenet -ResizeWithRatio -Resizes -Resnet -ResultMonitor -RetinaMask -RetinaNet -Rewon -RoBERTa -RobertaModel -RobertaTokenizer -RocStories -Romary -Rsqrt -Runtime -RuntimeError -Rusia -Ruslan -SBSTD -SENet -SEP -SGD -SHA -SMBO -SMBOs -SOTA -SPIQ -SPR -SQuAD -SSDMobilenet -SSDSC -SSDSCKKB -STS -SUT -SageMaker -Sagot -Salakhutdinov -Salesforce -Salimans -Sanh -SavedModel -Scalable -ScriptModule -Seddah -SegmentationMask -SelfKnowledgeDistillationLossConfig -Sep -SeqDataCollator -ServerAPP -ServerPool -SettingsPython -Sharma -Shen -Shirish -ShuffleNet -Shufflenet -Shvets -SigOpt -Sigopt -SingleStream -Skylake -Smola -SmoothQuant -SoX -Socher -SocketIO -Soricut -SpaCy -SparseCategoricalAccuracy -SparseCategoricalCrossentropy -SparseLib -Spearman -Sqrt -Squad -SquadF -SquadV -SquaredDifference -SqueezeBERT -SqueezeNet -SrcTuple -StableDiffusionPipeline -Standley -StartTest -Startup -Stoyanov -Subgraph -Submodules -Sumanth -Summarization -SummaryWriter -SuperBench -Supernet -SupportMatrix -Sutskever -Suyue -Suárez -Symlinks -SystemUnderTest -Szymon -TBD -TEQ -TF -TFBertForSequenceClassification -TFRecord -TFRecordDataset -TFRobertaModel -TFSlimNetsFactory -TFSlimNetsFactory's -THCudaTensor -TLS -TODO -TOL -TPE -TPU -TZ -TaskDB -TaskLauncher -TaskMonitor -TaskXX -TaskXXX -TensorBoard -TensorDataset -TensorFlow -TensorInfo -TensorProto -TensorRT -TensorflowQATModel -TensorflowQuery -TensorflowTopK -TensorrtExecutionProvider -TestSettings -Thalaiyasingam -Tian -Tidx -TimDettmers -TinyBERT -ToArray -ToBGR -ToNDArray -ToPILImage -ToRange -ToTensor -Tokenizer -Tokenizers -TopK -TorchDynamo -TorchScript -TorchSmoothQuant -TorchVision -Torr -Toutanova -TransfoXLModel -TransfoXLTokenizer -TransformImage -Treebank -TuneStrategy -TuningCriterion -UI -UID -UKERBljNxC -UNet -UTC -UmK -Unet -Upscaled -VCVTNE -VCVTNEPS -VDPBF -VMware -VNNI -VOC -VOCMApMetrics -VOCRecord -VOCdevkit -VOCmAP -VRAM -VSCode -VTune -ValueError -Vanhoucke -Varshney -Vecchio -Veronika -Veselin -ViT -Villemonte -WARMUPS -WIDERFACE -WIP -WLYDCRB -WOQ -Wallach -Wasserblat -WeChat -WebSocket -WebSockets -WebText -Webcam -WeightOnlyLinear -WeightPruningConfig -WeightSharedConvolutionalBoxPredictor -Wformat -WideResNet -Wightman -WikiText -WilsonCity -Winogrande -WnD -Wnxu -WordPiece -Workflow -XKeyboard -XLA -XLMModel -XLMTokenizer -XLNet -XLNetModel -XLNetTokenizer -XNLI -XXXX -Xbyak -Xception -Xdiag -Xeon -Xiang -Xiao -Xiong -Xiuying -XlUH -YADWOFuj -YKd -YOLOV -YOLOv -YY -YagFgODM -Yan -Yi -Yiming -Yinhan -Yoann -Yosinski -YqgzY -Yuanjun -Yue -Yunpeng -Yurchuk -Yvinec -ZFNet -ZHShareTargetIDMore -Zafrir -ZeroPoint -Zettlemoyer -Zhang -Zhenzhong -Zhi -Zhilin -Zhongyue -Zhu -Zihang -Zptls -aa -aac -aae -aarch -abc -abcadf -abeja -abi -absl -absmax -abspath -abstractive -acc -accuracies -acdc -activations -actorder -adam -adaptor -adaptor's -addr -ade -ae -aea -af -ai -aia -aidevcloud -ailab -al -albert -aldk -alexnet -algo -algos -alibaba -aliblade -allenai -alloc -alsologtostderr -amazonaws -amazonlinux -amd -amerather -amodio -amongst -amsgrad -amx -analytics -andravin -andreamad -anisotropic -anno -anton -ap -apache -api -apis -arXiv -arange -arcface -arcfaceresnet -archs -arg -argmax -argparse -args -arxiv -asd -astype -asym -async -aten -atrous -att -attr -attredirects -attrs -auc -aug -autgrad -autoEnabling -autocast -autogenerate -autograd -autoinc -automixedprecisionmkl -autonumber -autopep -ava -avgloss -avx -awk -awq -aws -azuremarketplace -bCO -backend -backends -backticks -baremetal -bart -barthez -bashrc -basicConfig -batchgenerators -batchnorm -bayesian -bazel -bb -bbbb -bbox -bboxes -bc -bcb -bcc -bccf -bce -bd -bdb -bdf -bdist -beba -benchmarked -benchmarking -berkeleyvision -bert -berts -bertsquad -bfc -bfloat -bicubic -bidaf -bigdl -bigscience -bilibili -bilinear -billsum -binarize -binarized -biomedical -bitnami -bitsandbytes -blendcnn -bleu -blocktime -blockwise -blogpost -bloomz -bmm -bn -bnb -bninception -bobw -bolded -booktitle -bool -boolean -boxlist -br -brgemm -brighly -broadcasted -bs -bsmock -bsnone -bugfix -buildin -builtin -bvlcalexnet -bzip -cadene -caffe -caffenet -cafferesnet -calib -calibrationcsv -calibrationset -camembert -canada -cb -cbica -ccdb -ccedc -ccl -cd -cdb -cded -cdf -cdn -cdot -cdrdv -ce -ceba -cec -ceee -ceil -centerNet -centernet -centos -cern -certfile -cfa -cff -cffi -cfg -cfgs -channelx -chatbot -checkbox -checkboxes -chmod -chongruo -chris -christmas -ci -cifar -circleci -cityscapes -cityscapesScripts -cityscapesscripts -cj -cknowledge -ckpt -ckpts -cla -classDef -classDiagram -clcarwin -cli -clickAuto -clickEnable -clickSetting -clipnorm -clm -cloudblogs -cls -clx -cly -cmake -cmd -cn -cnn -cnt -cocoApi -cocoapi -cocodataset -cocoraw -codalab -codebert -codecogs -codegen -codenamed -codeofconduct -codexglue -colorama -colspan -compat -compilervars -concat -cond -conda -condconv -conf -config -configs -confs -connectSSH -const -constexpr -constfold -contrib -conv -convolutional -cooldown -copt -coreml -cp -cpp -cpu -cpufreq -cpus -cpython -creafz -creatis -creativecommons -criteo -criterions -croping -crossvalidaton -crt -csarron -css -csv -ctrl -ctuning -ctx -cuda -cudaPopCallConfiguration -cudatoolkit -cudnn -curr -customised -cv -cwad -cwd -cxxopt -cy -cypw -cython -dNative -dXY -da -dae -daf -dailymail -darknet -dataLoader -databricks -datadir -datafile -dataloader -dataloaders -datapoints -datas -dataset -datasets -datatype -datatypes -dathath -dbaeumer -dbmdz -dbox -dbs -dcn -ddp -de -deberta -decapoda -deeac -deeb -deepcopy -deepengine -deeplab -deeplabv -deeplearning -deepset -denseblock -denselayer -densenet -deps -dequant -dequantization -dequantize -dequantized -desc -descs -dest -destructor -detections -detectron -detr -dev -devcloud -devel -devtool -dfb -dgpu -diag -dialogpt -dicoo -dicts -diffusers -dir -dirname -distil -distilbert -distilgpt -distillated -distillating -distilrobert -distilroberta -distro -distutils -dividiti -dl -dlabel -dlboost -dlrm -dmjx -dmlc -dnf -dnn -dnnl -doclist -docstrings -doctrings -docutils -doesn -doteq -dowmsampling -downloader -downsampled -downsampling -doxygen -dpn -dpr -dq -dropdown -ds -dscore -dst -dtype -dualpathnetworks -duc -dunet -dvdt -dw -dynamiccaly -dynas -eD -ead -eaf -earlystop -eb -ebbce -ec -ece -ecotrust -edgetpu -edu -ee -eec -eer -ef -efb -efe -efficientNet -efficientnet -eg -eightbit -einstein -el -electra -elif -eltwise -emCgSTlJaAg -ema -emb -embeddings -embs -emsp -enableHistory -enablerepo -ende -enfr -eng -english -ensembling -ensp -entrypoint -enum -env -environ -ep -eps -eq -erf -eriklindernoren -eslint -esri -et -etection -eval -evals -evaluator -evel -exdb -exemplarily -existing -expanduser -extensibility -extmanager -extractive -f'scale -fPIC -fabian -facebook -facebookresearch -fafdcd -fairseq -fallbacks -fanout -faq -fastrcnn -fatihcakirs -favourably -fb -fba -fbgemm -fbnetc -fbresnet -fc -fcn -fd -fdb -fdbf -fe -feedbacks -feedstock -ferplus -ffc -filename -filenames -filepath -filesystem -finbert -finetune -finetuned -finetuning -flac -flaubert -flavour -flavours -floatfunctional -fmfn -fmt -fmtstr -fn -fname -fns -folj -foregound -fp -fpic -fpn -frac -frontend -fstack -ftfy -fullres -func -functionalities -functionet -functools -funsd -fvcore -fw -fx -gRPC -galata -gcc -gchhablani -gclient -gcp -gd -geffnet -gelu -gemm -geomean -german -germeval -gestaltit -getitem -getsize -gh -gid -gif -github -githubusercontent -gitmodules -gloo -gluebenchmark -gluepy -gluon -gluoncv -gluonnlp -glx -gn -gnq -goldsborough -goog -google -googleapis -googleblog -googlenet -googlesource -gpb -gpg -gpt -gptq -gpu -gpus -grafftti -graphDef -graphdef -graphsage -grappler -grey -groupnorm -grpc -gtFile -gtFine -gtest -gtests -gui -gz -gzY -haibinlin -haiz -hangzhang -hardcoding -hardswish -hardtanh -hardwares -hawq -heatmaps -helloworld -henson -hiddenlayer -hippocampus -historyDetail -hlu -horovod -horovodrun -hostfile -hostname -howey -howpublished -hpo -hpp -href -html -htmlLabels -http -https -huawei -hubert -huggingface -hujie -hvd -hyp -hyperparameter -hyperparameters -hypotype -iLit -iOS -ia -ibean -icc -icpc -icx -ide -idx -ie -ieee -ieeexplore -iire -ilsvrc -im -imagecocodataset -imagenet -imagesTr -imagesTs -img -imgrec -imgs -imgx -impl -inceptionresnetv -inceptionv -incollection -indexValue -indices -indico -inferenceoptimizer -inferencer -informations -infos -init -initializer -innersource -inp -inplace -inproceedings -inputcsv -insa -instancenorm -instanceonly -instantiation -integerops -intel -intelai -inteltensorflow -interoperability -intra -intrinsics -introudces -ints -inturn -io -ios -iou -ip -ipc -ipex -ipynb -ipynbrun -ipython -ir -irv -isinstance -issuecomment -itemName -itemStyle -iter -iterable -iters -itex -iz -jJA -japanese -jemalloc -jestjs -jim -jit -jitter -jlpm -jpeg -jpg -jpserver -jpwarren -js -json -jsonl -jsons -jupyter -jupyterlab -kaggle -kaggleAdDisplayChallenge -kai -kaiming -kawapanion -kcho -kd -keepachangelog -keepbs -keepdim -ker -keras -kern -keyfile -keypoint -kimiyoung -kitti -kmp -kriz -kwargs -lAtr -lS -labelsTr -labextension -labextensions -lang -latencies -lates -layernorm -layoutlm -layoutlmft -layoutlmv -ld -leakyrelu -learnable -lecun -leftIcon -leftImg -len -lf -lfaidata -lfs -li -liKE -libGL -libdeep -libengine -libffi -libgl -libglib -libiomp -libjemalloc -libmlperf -librispeech -librosa -libsndfile -libstdc -libz -licence -lin -linkedin -linkopt -linoxide -linux -linuxfoundation -llamanorm -llms -llvmlite -lm -ln -loadgen -localdisk -localhost -logdir -logfile -login -logits -logtostderr -longformer -lossy -lowproposals -lowres -lpot -lr -lscpu -lt -luYBWA -lua -lutzroeder -lvwerra -lyon -lzma -mAP -mAp -mBERT -mBart -mIOU -mIoU -macOS -mailto -mainpage -manylinux -marianmt -masia -maskrcnn -maskrnn -massa -matcher -mathbb -matmul -matplotlib -matricses -matsubara -maxSizeInComplete -maxdepth -maxindrange -maxk -mbart -mbv -mcc -mcordts -md -measurer -medicaldecathlon -meetup -mem -membind -mems -messi -metabuild -metadata -metamind -microsoft -miguelgrinberg -minibatch -minilm -minimalistic -minival -minloglevel -minmax -mins -misalignments -miscs -mixedprecision -mixnet -mixup -mkdir -mkl -mlap -mlas -mlcommons -mlefficiency -mll -mlm -mlp -mlpc -mlperf -mlt -mmdetection -mmlab -mnasnet -mnist -mnli -mobilebert -mobilenet -mobilenetv -modalities -modality -modelfeatures -modelforward -modelinput -modellogits -modelmean -modelsize -modelstd -mosaicml -mose -mpi -mpirun -mpt -mrcnn -mrm -mrpc -mse -msg -msvc -mul -mult -multi -multiclass -multilabel -multinli -multiscale -mutli -mv -mx -mxnet -myModel -mzbert -nSsKchNAySU -namedtuple -nano -nanohanno -nas -nasnet -nasnetalarge -nasnetamobile -nb -nbest -nbias -nbsp -nc -nchw -nd -nda -ndarray -nderlu -ndhwc -neo -neox -nepoch -ner -nervanasystems -nesterov -netflix -netron -networkbuilders -neval -newstest -nextplatform -nezha -nf -nfolds -ng -ngatang -ngram -ni -nielsr -nifti -niftis -nii -nl -nli -nll -nlp -nlpyang -nltk -nm -nms -nn -nnU -nnUNet -nnUNetPlansv -nnUNetTrainer -nnUNetTrainerV -nnUNetTrainers -nnUnet -nnodes -nnu -nnunet -noah -noduplicates -noobj -np -npm -npmjs -nproc -npy -npz -nq -nreimers -nrix -ns -nsample -nsamples -nsdf -nsga -nthreads -ntrain -num -numCompleteThreads -numTest -numTraining -numactl -numba -numerics -numpy -nvcc -nvidia -nvme -nw -nyu -oQA -oaas -oc -ofa -ofi -oje -ok -ol -onboarding -oneAPI -oneDNN -oneapi -oneccl -onednn -oneshot -onlinedocs -onnx -onnxrt -onnxrtadaptor -onnxruntime -oo -oob -openaccess -openai -opencv -openmp -openslr -opensource -openssl -openvino -openvinotoolkit -opset -opsetid -optim -optimizations -optimizers -optype -optypes -optypewise -opwise -os -osJJ -ot -oup -outPut -outdir -outliers -outputfile -ov -overfeat -overfit -overfitted -pQ -pageId -palletsprojects -panoptic -paperswithcode -parallelize -parallelizes -parallelizing -param -parameterization -parametrization -params -pareto -participations -pastebin -patientIDs -pb -pbar -pc -pdf -pdp -pegasus -pelee -peleenet -pepy -percdamp -perchannel -perf -perftests -philschmid -phrasebank -phy -physcpubind -pixAcc -pjreddie -pkgs -pkill -pkl -pky -plm -pls -pnasnet -png -polynet -pos -postprocess -postprocessed -postprocessing -powersave -ppi -pplm -ppn -pragma -pre -prebuild -prebuilt -precisions -pred -preds -preload -preprint -preprocess -preprocessed -preprocesses -preprocessing -preprocessor -prerelease -pretrain -pretrained -pretrainedmodels -pretraining -prev -prioritizies -probs -proc -productizing -prodview -profilings -proto -protoc -protractortest -pruneofa -pth -ptq -ptr -publis -pubtables -pudae -pw -pwd -px -py -pybind -pycocotools -pyguide -pylint -pymodule -pymoo -pypi -pyproject -pytest -pythonic -pytorch -pytorchic -pyyaml -qa -qat -qconfig -qdq -qdqops -qint -qlinear -qlinearops -qnli -qoperator -qpa -qps -qq -qqp -qscheme -qsl -qstat -qsub -qsvr -qtcreator -qtype -quant -quantile -quantizable -quantization -quantizations -quantize -quantized -quantizer -quantizes -quantizing -quickstart -qweight -rAjHyXhTzz -rajpurkar -ramdisk -randn -rc -rcnn -reStructuredText -readme -readthedocs -realtime -rebase -recommonmark -recordio -recurse -regex -registerCommand -rehm -reinstall -releaser -relu -repo -repo's -repos -repo’s -requantize -resampled -resampling -rescale -resize -resized -resnest -resnet -resnetv -resnext -ressources -ret -retinaNet -retinanet -retweets -reusability -rf -rfcn -rgb -rmax -rmin -rn -rng -rnnt -ro -roberta -roc -rosanneliu -rougeL -rougeLsum -rowanz -rowspan -rq -rst -rtd -rtn -runhooks -runtime -ruserok -rusiaaman -rw -rwightman -sacremoses -sagemaker -salesforce -salti -samsum -sanh -sata -scalable -scaler -scatterFillKernel -sched -scheduler's -scikit -scm -scp -screenshots -sd -sdist -se -sed -seg -segm -segmentations -seid -senet -sentencepiece -sequenceDiagram -serializable -serverIP -serverextension -sess -settingPath -setuptools -setvars -sexualized -sgd -sgmoid -sharded -sharepoint -shouldn -showEvent -shufflenet -shufflenetv -shuoyang -sigmoid -signup -sigopt -sklearn -skx -skylion -smoothes -smoothquant -sndfile -snowly -socio -softmax -solutionslibrary -somain -sota -sox -spacings -spacy -spanbert -sparsednn -sparsification -sparsified -spearmanr -specificities -splitted -spm -spmm -spnasnet -spr -sqSiUy -sqlalchemy -sqrt -squadpy -squadv -squeezebert -squeezenet -src -sryqufw -ssd -sshleifer -sst -stackoverflow -stanford -startswith -startup -stderr -stdout -stds -stefan -stemblock -stepsize -storywriter -str -strided -struct -sts -stsb -styleguide -subexpression -subfolder -subfolders -subgraph -subgraphStyle -submodule -submodules -subprocesses -subsample -subtoken -sudo -summarization -superbench -supernet -superseeds -sut -sv -svg -swagaf -sym -symlink -symlinked -symlinks -syncedreview -synset -sys -tLoss -tanh -tatr -tb -tbe -tbody -td -te -techdecoded -tencent -tensor's -tensorBoard -tensorImageSize -tensorboard -tensorcore -tensorflow -tensorrt -teq -teraoperations -tesla -testability -textattack -tf -tfhub -tflite -tfp -tfrecord -tfrecords -tg -tgt -tgz -th -thead -thecvf -thepath -thp -thres -thrs -tiiuae -timeline -timestamps -tl -tlkh -tmp -tmpfs -toc -toctree -todo -tokenization -tokenize -tokenized -tokenizer -tokenizers -tokenizing -tol -tolist -toml -toolchain -toolchains -topRight -topk -topologies -torchaudio -torchdynamo -torchscript -torchtext -torchvision -toronto -totalizing -tp -tpe -tpu -tqdm -traceback -trainings -trainval -trainvaltest -transfo -travis -trigram -trt -tsl -tstandley -tsv -tunings -tuningusage -tuple -tuples -twlt -txt -uXXXXX -uber -ubuntu -ubyte -ui -uint -uk -ultraface -ultralytics -un -uncomment -uncompress -unet -unidecode -unilm -uniq -unittest -unref -unscale -unsqueeze -unstack -upenn -uploader -upscaled -upstreamed -url -username -userspace -usp -usr -util -utils -utm -valminusminival -valset -vec -veronikayurchuk -versioned -vgg -viewlet -viewpage -visualstudio -vmware -vnni -voc -voxel -voxels -vram -vscode -vxga -wWLes -waleedka -wangg -warmup -wav -wd -webcam -webite -webpage -wedam -weiaicunzai -weixin -wget -whitehat -whitelist -whl -wikipedia -wikitext -wip -wmt -wnd -wnli -woq -workdir -workflow -workflows -workspace -wrt -wsl -ww -wwm -www -xHKe -xV -xYNrZdEAnrHk -xad -xcb -xception -xchannel -xcode -xd -xeon -xgb -xgboost -xl -xlarge -xlm -xlnet -xmggbmga -xml -xnli -xpu -xsum -xvf -xvzf -xxxx -xxy -xxz -xywh -xyxy -xz -xzvf -yacs -yaml -yamls -yann -yao -yarnpkg -yizhu -yjxiong -yjyh -yolo -yolov -yoshitomo -yosinski -yottx -youtooz -youtube -yrw -za -zalandoresearch -zenodo -zfnet -zh -zhang -zhanghang -zhihu -zhongyuezhang -zhuanlan -zihangdai -zk -znoexecstack -znow -zp -zrelro -zrl -zxvf -Éric diff --git a/.azure-pipelines/scripts/codeScan/pyspelling/pyspelling.sh b/.azure-pipelines/scripts/codeScan/pyspelling/pyspelling.sh deleted file mode 100644 index d701bc14ed0..00000000000 --- a/.azure-pipelines/scripts/codeScan/pyspelling/pyspelling.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -source /neural-compressor/.azure-pipelines/scripts/change_color.sh -RESET="echo -en \\E[0m \\n" # close color - -work_dir="/neural-compressor/.azure-pipelines/scripts/codeScan/pyspelling" -log_dir="$work_dir/../scanLog" -mkdir -p $log_dir - -sed -i "s|\${DICT_DIR}|$work_dir|g" $work_dir/pyspelling_conf.yaml -sed -i "s|\${REPO_DIR}|/neural-compressor|g" $work_dir/pyspelling_conf.yaml - -pyspelling -c $work_dir/pyspelling_conf.yaml >$log_dir/pyspelling.log -exit_code=$? - -$BOLD_YELLOW && echo "------------------- Current log file output start --------------------------" && $RESET -cat $log_dir/pyspelling.log -$BOLD_YELLOW && echo "------------------- Current log file output end ----------------------------" && $RESET - -if [ ${exit_code} -ne 0 ]; then - $BOLD_RED && echo "Error!! Please Click on the artifact button to download and view pyspelling error details." && $RESET - exit 1 -fi -$BOLD_PURPLE && echo "Congratulations, Pyspelling check passed!" && $LIGHT_PURPLE && echo "You can click on the artifact button to see the log details." && $RESET -exit 0 diff --git a/.azure-pipelines/scripts/codeScan/pyspelling/pyspelling_conf.yaml b/.azure-pipelines/scripts/codeScan/pyspelling/pyspelling_conf.yaml deleted file mode 100644 index 408aef59810..00000000000 --- a/.azure-pipelines/scripts/codeScan/pyspelling/pyspelling_conf.yaml +++ /dev/null @@ -1,19 +0,0 @@ -matrix: - - name: Markdown - hunspell: - d: en_US.ISO8859-15 - dictionary: - wordlists: - - ${DICT_DIR}/inc_dict.txt - output: ${DICT_DIR}/inc_dict.dic - sources: - - ${REPO_DIR}/docs/source/*.md - - ${REPO_DIR}/*.md - - ${REPO_DIR}/examples/**/*.md|!${REPO_DIR}/examples/pytorch/**/huggingface_models/**/*.md|!${REPO_DIR}/examples/README.md - - ${REPO_DIR}/neural_compressor/**/*.md - - ${REPO_DIR}/neural_coder/**/*.md - - ${REPO_DIR}/neural_coder/*.md - - ${REPO_DIR}/neural_solution/*.md - - ${REPO_DIR}/neural_solution/docs/source/*.md - - ${REPO_DIR}/neural_solution/examples/**/*.md - - ${REPO_DIR}/neural_insights/*.md diff --git a/.azure-pipelines/spell-check.yml b/.azure-pipelines/spell-check.yml deleted file mode 100644 index 809eb2d14f4..00000000000 --- a/.azure-pipelines/spell-check.yml +++ /dev/null @@ -1,27 +0,0 @@ -trigger: none - -pr: - autoCancel: true - drafts: false - branches: - include: - - master - -pool: - vmImage: "ubuntu-latest" - -variables: - CODE_SCAN_LOG_PATH: ".azure-pipelines/scripts/codeScan/scanLog" - -stages: - - stage: PyspellingCodeScan - displayName: Pyspelling Code Scan - dependsOn: [] - jobs: - - job: Pyspelling - displayName: Pyspelling - steps: - - template: template/code-scan-template.yml - parameters: - codeScanFileName: "pyspelling" - uploadPath: "pyspelling.log" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 22682e33958..405f7477b90 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,8 +9,7 @@ exclude: | neural_insights/test.+| neural_solution/frontend/gRPC/proto/neural_solution_pb2.py| neural_coder/extensions/.+| - neural_coder/examples/.+| - examples/.+ + neural_coder/examples/.+ )$ repos: @@ -19,6 +18,10 @@ repos: hooks: - id: end-of-file-fixer files: (.*\.(py|md|rst|yaml|yml))$ + exclude: | + (?x)^( + examples/.+ + )$ - id: check-json - id: check-yaml exclude: | @@ -37,8 +40,16 @@ repos: )$ args: [--unique] - id: requirements-txt-fixer + exclude: | + (?x)^( + examples/.+ + )$ - id: trailing-whitespace files: (.*\.(py|rst|cmake|yaml|yml))$ + exclude: | + (?x)^( + examples/.+ + )$ - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.4 @@ -72,6 +83,10 @@ repos: rev: 5.12.0 hooks: - id: isort + exclude: | + (?x)^( + examples/.+ + )$ - repo: https://github.com/PyCQA/docformatter rev: v1.7.5 @@ -84,16 +99,21 @@ repos: --black, --style=google, ] + exclude: | + (?x)^( + examples/.+ + )$ - repo: https://github.com/psf/black.git - rev: 23.7.0 + rev: 23.9.1 hooks: - id: black files: (.*\.py)$ exclude: | (?x)^( neural_compressor/conf/config.py| - neural_compressor/conf/pythonic_config.py + neural_compressor/conf/pythonic_config.py| + examples/.+ )$ - repo: https://github.com/asottile/blacken-docs @@ -101,17 +121,30 @@ repos: hooks: - id: blacken-docs args: [--line-length=120, --skip-errors] - exclude: docs/source-app + exclude: | + (?x)^( + examples/.+| + docs/source-app + )$ - repo: https://github.com/codespell-project/codespell - rev: v2.2.4 + rev: v2.2.5 hooks: - id: codespell additional_dependencies: - tomli + exclude: | + (?x)^( + examples/.*(txt|patch)| + examples/onnxrt/nlp/huggingface_model/text_generation/llama/quantization/ptq_static/prompt.json + )$ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.286 + rev: v0.0.287 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix, --no-cache] + exclude: | + (?x)^( + examples/.+ + )$ diff --git a/examples/.config/model_params_tensorflow.json b/examples/.config/model_params_tensorflow.json index 9b5fa9e4949..7624fa89603 100644 --- a/examples/.config/model_params_tensorflow.json +++ b/examples/.config/model_params_tensorflow.json @@ -966,7 +966,7 @@ "main_script": "tf_benchmark.py", "batch_size": 1 }, - "GraphSage": { + "GraphSage-oob": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/GraphSage/GraphSage.pb", diff --git a/examples/notebook/dynas/BERT_SST2_Supernet_NAS.ipynb b/examples/notebook/dynas/BERT_SST2_Supernet_NAS.ipynb index 857f74505d7..d479b443a23 100644 --- a/examples/notebook/dynas/BERT_SST2_Supernet_NAS.ipynb +++ b/examples/notebook/dynas/BERT_SST2_Supernet_NAS.ipynb @@ -85,7 +85,7 @@ "source": [ "# Configure NAS Algorithm\n", "\n", - "The `NASConfig` class allows us to define the appropriate paramenters for determining how the neural architecture search is performed. Currently, the following multi-objective evolutionary algorithms are supported by the `dynas` approach: \n", + "The `NASConfig` class allows us to define the appropriate parameters for determining how the neural architecture search is performed. Currently, the following multi-objective evolutionary algorithms are supported by the `dynas` approach: \n", "* `'nsga2'`\n", "* `'age'`" ] diff --git a/examples/notebook/dynas/MobileNetV3_Supernet_NAS.ipynb b/examples/notebook/dynas/MobileNetV3_Supernet_NAS.ipynb index 78b3222c6a0..edc6e09537f 100644 --- a/examples/notebook/dynas/MobileNetV3_Supernet_NAS.ipynb +++ b/examples/notebook/dynas/MobileNetV3_Supernet_NAS.ipynb @@ -83,7 +83,7 @@ "source": [ "# Configure NAS Algorithm\n", "\n", - "The `NASConfig` class allows us to define the appropriate paramenters for determining how the neural architecture search is performed. Currently, the following multi-objective evolutionary algorithms are supported by the `dynas` approach: \n", + "The `NASConfig` class allows us to define the appropriate parameters for determining how the neural architecture search is performed. Currently, the following multi-objective evolutionary algorithms are supported by the `dynas` approach: \n", "* `'nsga2'`\n", "* `'age'`" ] diff --git a/examples/notebook/dynas/ResNet50_Quantiation_Search_Supernet_NAS.ipynb b/examples/notebook/dynas/ResNet50_Quantiation_Search_Supernet_NAS.ipynb index 3ccf318df6f..9af1bc8a4cf 100644 --- a/examples/notebook/dynas/ResNet50_Quantiation_Search_Supernet_NAS.ipynb +++ b/examples/notebook/dynas/ResNet50_Quantiation_Search_Supernet_NAS.ipynb @@ -83,7 +83,7 @@ "source": [ "# Configure NAS Algorithm\n", "\n", - "The `NASConfig` class allows us to define the appropriate paramenters for determining how the neural architecture search is performed. Currently, the following multi-objective evolutionary algorithms are supported by the `dynas` approach: \n", + "The `NASConfig` class allows us to define the appropriate parameters for determining how the neural architecture search is performed. Currently, the following multi-objective evolutionary algorithms are supported by the `dynas` approach: \n", "* `'nsga2'`\n", "* `'age'`" ] diff --git a/examples/notebook/dynas/Transformer_LT_Supernet_NAS.ipynb b/examples/notebook/dynas/Transformer_LT_Supernet_NAS.ipynb index dc9004d207a..c9b685ab753 100644 --- a/examples/notebook/dynas/Transformer_LT_Supernet_NAS.ipynb +++ b/examples/notebook/dynas/Transformer_LT_Supernet_NAS.ipynb @@ -85,7 +85,7 @@ "source": [ "# Configure NAS Algorithm\n", "\n", - "The `NASConfig` class allows us to define the appropriate paramenters for determining how the neural architecture search is performed. Currently, the following multi-objective evolutionary algorithms are supported by the `dynas` approach: \n", + "The `NASConfig` class allows us to define the appropriate parameters for determining how the neural architecture search is performed. Currently, the following multi-objective evolutionary algorithms are supported by the `dynas` approach: \n", "* `'nsga2'`\n", "* `'age'`" ] diff --git a/examples/notebook/perf_fp32_int8_tf/run.sh b/examples/notebook/perf_fp32_int8_tf/run.sh index 0e068467e60..4dda9e0c3cc 100644 --- a/examples/notebook/perf_fp32_int8_tf/run.sh +++ b/examples/notebook/perf_fp32_int8_tf/run.sh @@ -7,7 +7,7 @@ if [ ! -d $ENV_NAME ]; then echo "Create env $ENV_NAME ..." bash set_env.sh else - echo "Already created env $ENV_NAME, skip craete env" + echo "Already created env $ENV_NAME, skip create env" fi source $ENV_NAME/bin/activate diff --git a/examples/notebook/pytorch/alexnet_fashion_mnist/scripts/python_src/inc_quantize_model.py b/examples/notebook/pytorch/alexnet_fashion_mnist/scripts/python_src/inc_quantize_model.py index 833bf3c7783..a81e04f9396 100755 --- a/examples/notebook/pytorch/alexnet_fashion_mnist/scripts/python_src/inc_quantize_model.py +++ b/examples/notebook/pytorch/alexnet_fashion_mnist/scripts/python_src/inc_quantize_model.py @@ -68,4 +68,3 @@ def main(): if __name__ == "__main__": main() - \ No newline at end of file diff --git a/examples/notebook/pytorch/alexnet_fashion_mnist/scripts/python_src/train_alexnet_fashion_mnist.py b/examples/notebook/pytorch/alexnet_fashion_mnist/scripts/python_src/train_alexnet_fashion_mnist.py index d96c8404d80..fb25315a506 100755 --- a/examples/notebook/pytorch/alexnet_fashion_mnist/scripts/python_src/train_alexnet_fashion_mnist.py +++ b/examples/notebook/pytorch/alexnet_fashion_mnist/scripts/python_src/train_alexnet_fashion_mnist.py @@ -19,4 +19,3 @@ def main(): if __name__ == "__main__": main() - \ No newline at end of file diff --git a/examples/notebook/tensorflow/vgg19_ibean/inc_quantize_vgg19.ipynb b/examples/notebook/tensorflow/vgg19_ibean/inc_quantize_vgg19.ipynb index a0cab932b0f..a50011d43d6 100644 --- a/examples/notebook/tensorflow/vgg19_ibean/inc_quantize_vgg19.ipynb +++ b/examples/notebook/tensorflow/vgg19_ibean/inc_quantize_vgg19.ipynb @@ -460,7 +460,7 @@ "source": [ "## Test the Performance & Accuracy\n", "\n", - "We use same script to test the perfomrance and accuracy of the FP32 and INT8 models.\n", + "We use same script to test the performance and accuracy of the FP32 and INT8 models.\n", "\n", "Use 4 CPU cores to test process.\n" ] diff --git a/examples/onnxrt/nlp/huggingface_model/language_modeling/quantization/ptq_dynamic/main.py b/examples/onnxrt/nlp/huggingface_model/language_modeling/quantization/ptq_dynamic/main.py index f815dbb0385..345f6fac183 100644 --- a/examples/onnxrt/nlp/huggingface_model/language_modeling/quantization/ptq_dynamic/main.py +++ b/examples/onnxrt/nlp/huggingface_model/language_modeling/quantization/ptq_dynamic/main.py @@ -61,7 +61,7 @@ def __init__(self, tokenizer, args, file_path='train', block_size=1024): for i in range(0, len(tokenized_text)-block_size+1, block_size): # Truncate in block of block_size self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i:i+block_size])) - # Note that we are loosing the last truncated example here for the sake of simplicity (no padding) + # Note that we are losing the last truncated example here for the sake of simplicity (no padding) # If your dataset is small, first you should loook for a bigger one :-) and second you # can change this behavior by adding (model specific) padding. @@ -166,7 +166,7 @@ def main(): parser.add_argument("--model_name_or_path", type=str, help="The model checkpoint for weights initialization.") parser.add_argument("--cache_dir", default="", type=str, - help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)") + help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)") parser.add_argument("--block_size", default=1024, type=int, help="Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." diff --git a/examples/onnxrt/nlp/huggingface_model/language_modeling/quantization/ptq_static/main.py b/examples/onnxrt/nlp/huggingface_model/language_modeling/quantization/ptq_static/main.py index 3c45336c9f8..8229f34894d 100644 --- a/examples/onnxrt/nlp/huggingface_model/language_modeling/quantization/ptq_static/main.py +++ b/examples/onnxrt/nlp/huggingface_model/language_modeling/quantization/ptq_static/main.py @@ -61,7 +61,7 @@ def __init__(self, tokenizer, args, file_path='train', block_size=1024): for i in range(0, len(tokenized_text)-block_size+1, block_size): # Truncate in block of block_size self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i:i+block_size])) - # Note that we are loosing the last truncated example here for the sake of simplicity (no padding) + # Note that we are losing the last truncated example here for the sake of simplicity (no padding) # If your dataset is small, first you should loook for a bigger one :-) and second you # can change this behavior by adding (model specific) padding. @@ -166,7 +166,7 @@ def main(): parser.add_argument("--model_name_or_path", type=str, help="The model checkpoint for weights initialization.") parser.add_argument("--cache_dir", default="", type=str, - help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)") + help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)") parser.add_argument("--block_size", default=1024, type=int, help="Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." diff --git a/examples/onnxrt/nlp/huggingface_model/text_classification/mix_precision/main.py b/examples/onnxrt/nlp/huggingface_model/text_classification/mix_precision/main.py index fa5bd52f578..ed33d421833 100644 --- a/examples/onnxrt/nlp/huggingface_model/text_classification/mix_precision/main.py +++ b/examples/onnxrt/nlp/huggingface_model/text_classification/mix_precision/main.py @@ -370,7 +370,7 @@ def result(self): onnx.save(new, args.output_model) except: logging.warning("Fail to upgrade opset_import to > 15, " - "please upgrate it manually to run with bf16 data type") + "please upgrade it manually to run with bf16 data type") else: converted_model.save(args.output_model) diff --git a/examples/onnxrt/nlp/huggingface_model/text_generation/llama/quantization/ptq_static/main.py b/examples/onnxrt/nlp/huggingface_model/text_generation/llama/quantization/ptq_static/main.py index c2cff5bb472..1cf19b1873c 100644 --- a/examples/onnxrt/nlp/huggingface_model/text_generation/llama/quantization/ptq_static/main.py +++ b/examples/onnxrt/nlp/huggingface_model/text_generation/llama/quantization/ptq_static/main.py @@ -149,7 +149,7 @@ def benchmark(model): if input_tokens in prompt_pool: prompt = prompt_pool[input_tokens] else: - raise SystemExit('[ERROR] Plese use --prompt if want to use custom input.') + raise SystemExit('[ERROR] Please use --prompt if want to use custom input.') input_size = tokenizer(prompt, return_tensors="pt").input_ids.size(dim=1) print("---- Prompt size:", input_size) diff --git a/examples/onnxrt/nlp/onnx_model_zoo/bert-squad/quantization/ptq_dynamic/tokenization.py b/examples/onnxrt/nlp/onnx_model_zoo/bert-squad/quantization/ptq_dynamic/tokenization.py index 52c92adb81f..5a1ae735ce8 100644 --- a/examples/onnxrt/nlp/onnx_model_zoo/bert-squad/quantization/ptq_dynamic/tokenization.py +++ b/examples/onnxrt/nlp/onnx_model_zoo/bert-squad/quantization/ptq_dynamic/tokenization.py @@ -361,7 +361,7 @@ def tokenize(self, text): def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically contorl characters but we treat them + # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True diff --git a/examples/onnxrt/nlp/onnx_model_zoo/gpt2/quantization/ptq_dynamic/gpt2.py b/examples/onnxrt/nlp/onnx_model_zoo/gpt2/quantization/ptq_dynamic/gpt2.py index 84b40644a46..a67042a600d 100644 --- a/examples/onnxrt/nlp/onnx_model_zoo/gpt2/quantization/ptq_dynamic/gpt2.py +++ b/examples/onnxrt/nlp/onnx_model_zoo/gpt2/quantization/ptq_dynamic/gpt2.py @@ -74,7 +74,7 @@ def __init__(self, tokenizer, args, file_path='train', block_size=1024): for i in range(0, len(tokenized_text)-block_size+1, block_size): # Truncate in block of block_size self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i:i+block_size])) - # Note that we are loosing the last truncated example here for the sake of simplicity (no padding) + # Note that we are losing the last truncated example here for the sake of simplicity (no padding) # If your dataset is small, first you should loook for a bigger one :-) and second you # can change this behavior by adding (model specific) padding. @@ -184,7 +184,7 @@ def main(): parser.add_argument("--model_name_or_path", type=str, help="The model checkpoint for weights initialization.") parser.add_argument("--cache_dir", default="", type=str, - help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)") + help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)") parser.add_argument("--block_size", default=1024, type=int, help="Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." diff --git a/examples/onnxrt/nlp/onnx_model_zoo/mobilebert/quantization/ptq_dynamic/tokenization.py b/examples/onnxrt/nlp/onnx_model_zoo/mobilebert/quantization/ptq_dynamic/tokenization.py index 52c92adb81f..5a1ae735ce8 100644 --- a/examples/onnxrt/nlp/onnx_model_zoo/mobilebert/quantization/ptq_dynamic/tokenization.py +++ b/examples/onnxrt/nlp/onnx_model_zoo/mobilebert/quantization/ptq_dynamic/tokenization.py @@ -361,7 +361,7 @@ def tokenize(self, text): def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically contorl characters but we treat them + # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True diff --git a/examples/onnxrt/nlp/onnx_model_zoo/mobilebert/quantization/ptq_static/tokenization.py b/examples/onnxrt/nlp/onnx_model_zoo/mobilebert/quantization/ptq_static/tokenization.py index 52c92adb81f..5a1ae735ce8 100644 --- a/examples/onnxrt/nlp/onnx_model_zoo/mobilebert/quantization/ptq_static/tokenization.py +++ b/examples/onnxrt/nlp/onnx_model_zoo/mobilebert/quantization/ptq_static/tokenization.py @@ -361,7 +361,7 @@ def tokenize(self, text): def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically contorl characters but we treat them + # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True diff --git a/examples/onnxrt/object_detection/onnx_model_zoo/yolov3/quantization/ptq_static/main.py b/examples/onnxrt/object_detection/onnx_model_zoo/yolov3/quantization/ptq_static/main.py index 71670e6fc87..a99e7fc8474 100644 --- a/examples/onnxrt/object_detection/onnx_model_zoo/yolov3/quantization/ptq_static/main.py +++ b/examples/onnxrt/object_detection/onnx_model_zoo/yolov3/quantization/ptq_static/main.py @@ -400,4 +400,3 @@ def eval_func(model): 'pre_post_process_quantization': False}) q_model = quantization.fit(model, config, calib_dataloader=dataloader, eval_func=eval_func) q_model.save(args.output_model) - \ No newline at end of file diff --git a/examples/onnxrt/object_detection/onnx_model_zoo/yolov4/quantization/ptq_static/main.py b/examples/onnxrt/object_detection/onnx_model_zoo/yolov4/quantization/ptq_static/main.py index 8c1d941cdc3..8839afa6758 100644 --- a/examples/onnxrt/object_detection/onnx_model_zoo/yolov4/quantization/ptq_static/main.py +++ b/examples/onnxrt/object_detection/onnx_model_zoo/yolov4/quantization/ptq_static/main.py @@ -216,7 +216,7 @@ def postprocess_bbbox(pred_bbox, ANCHORS, STRIDES, XYSCALE=[1,1,1]): return pred_bbox def postprocess_boxes(pred_bbox, org_img_shape, input_size, score_threshold): - '''remove boundary boxs with a low detection probability''' + '''remove boundary boxes with a low detection probability''' valid_scale=[0, np.inf] pred_bbox = np.array(pred_bbox) diff --git a/examples/pytorch/image_recognition/3d-unet/quantization/ptq/fx/preprocess.py b/examples/pytorch/image_recognition/3d-unet/quantization/ptq/fx/preprocess.py index 758d92488ea..f86bf9e18c5 100644 --- a/examples/pytorch/image_recognition/3d-unet/quantization/ptq/fx/preprocess.py +++ b/examples/pytorch/image_recognition/3d-unet/quantization/ptq/fx/preprocess.py @@ -108,7 +108,7 @@ def main(): # Preprocess images, returns filenames list # This runs in multiprocess - print("Acually preprocessing data...") + print("Actually preprocessing data...") preprocessed_files = preprocess_MLPerf(model_dir, checkpoint_name, fold, fp16, list_of_lists, validation_files, preprocessed_data_dir, num_threads_preprocessing) diff --git a/examples/pytorch/nlp/blendcnn/distillation/eager/data.py b/examples/pytorch/nlp/blendcnn/distillation/eager/data.py index 01afa024adc..2c199b18ab6 100644 --- a/examples/pytorch/nlp/blendcnn/distillation/eager/data.py +++ b/examples/pytorch/nlp/blendcnn/distillation/eager/data.py @@ -113,7 +113,7 @@ def get_instances(self, text_file): ### Pipeline Classes for preprocessing ### class RemoveSymbols(Pipeline): - """ Remove unnecesary symbols """ + """ Remove unnecessary symbols """ def __init__(self, symbols): super().__init__() self.symbols = symbols diff --git a/examples/pytorch/nlp/blendcnn/distillation/eager/tokenization.py b/examples/pytorch/nlp/blendcnn/distillation/eager/tokenization.py index fd33b695c5c..10aa822b68a 100644 --- a/examples/pytorch/nlp/blendcnn/distillation/eager/tokenization.py +++ b/examples/pytorch/nlp/blendcnn/distillation/eager/tokenization.py @@ -90,7 +90,7 @@ def convert_tokens_to_ids(vocab, tokens): def whitespace_tokenize(text): - """Runs basic whitespace cleaning and splitting on a peice of text.""" + """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] @@ -257,7 +257,7 @@ def tokenize(self, text): def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically contorl characters but we treat them + # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True diff --git a/examples/pytorch/nlp/blendcnn/distillation/eager/utils.py b/examples/pytorch/nlp/blendcnn/distillation/eager/utils.py index 2fce8390482..2d1745efcd7 100644 --- a/examples/pytorch/nlp/blendcnn/distillation/eager/utils.py +++ b/examples/pytorch/nlp/blendcnn/distillation/eager/utils.py @@ -65,18 +65,18 @@ def find_sublist(haystack, needle): def get_logger(name, log_path): "get logger" logger = logging.getLogger(name) - fomatter = logging.Formatter( + formatter = logging.Formatter( '[ %(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s') if not os.path.isfile(log_path): f = open(log_path, "w+") fileHandler = logging.FileHandler(log_path) - fileHandler.setFormatter(fomatter) + fileHandler.setFormatter(formatter) logger.addHandler(fileHandler) #streamHandler = logging.StreamHandler() - #streamHandler.setFormatter(fomatter) + #streamHandler.setFormatter(formatter) #logger.addHandler(streamHandler) logger.setLevel(logging.DEBUG) diff --git a/examples/pytorch/nlp/blendcnn/quantization/ptq/ipex/data.py b/examples/pytorch/nlp/blendcnn/quantization/ptq/ipex/data.py index 01afa024adc..2c199b18ab6 100644 --- a/examples/pytorch/nlp/blendcnn/quantization/ptq/ipex/data.py +++ b/examples/pytorch/nlp/blendcnn/quantization/ptq/ipex/data.py @@ -113,7 +113,7 @@ def get_instances(self, text_file): ### Pipeline Classes for preprocessing ### class RemoveSymbols(Pipeline): - """ Remove unnecesary symbols """ + """ Remove unnecessary symbols """ def __init__(self, symbols): super().__init__() self.symbols = symbols diff --git a/examples/pytorch/nlp/blendcnn/quantization/ptq/ipex/tokenization.py b/examples/pytorch/nlp/blendcnn/quantization/ptq/ipex/tokenization.py index fd33b695c5c..10aa822b68a 100644 --- a/examples/pytorch/nlp/blendcnn/quantization/ptq/ipex/tokenization.py +++ b/examples/pytorch/nlp/blendcnn/quantization/ptq/ipex/tokenization.py @@ -90,7 +90,7 @@ def convert_tokens_to_ids(vocab, tokens): def whitespace_tokenize(text): - """Runs basic whitespace cleaning and splitting on a peice of text.""" + """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] @@ -257,7 +257,7 @@ def tokenize(self, text): def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically contorl characters but we treat them + # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True diff --git a/examples/pytorch/nlp/blendcnn/quantization/ptq/ipex/utils.py b/examples/pytorch/nlp/blendcnn/quantization/ptq/ipex/utils.py index 230125cca40..bb3edad60cb 100644 --- a/examples/pytorch/nlp/blendcnn/quantization/ptq/ipex/utils.py +++ b/examples/pytorch/nlp/blendcnn/quantization/ptq/ipex/utils.py @@ -79,18 +79,18 @@ def find_sublist(haystack, needle): def get_logger(name, log_path): "get logger" logger = logging.getLogger(name) - fomatter = logging.Formatter( + formatter = logging.Formatter( '[ %(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s') if not os.path.isfile(log_path): f = open(log_path, "w+") fileHandler = logging.FileHandler(log_path) - fileHandler.setFormatter(fomatter) + fileHandler.setFormatter(formatter) logger.addHandler(fileHandler) #streamHandler = logging.StreamHandler() - #streamHandler.setFormatter(fomatter) + #streamHandler.setFormatter(formatter) #logger.addHandler(streamHandler) logger.setLevel(logging.DEBUG) diff --git a/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/ptq_weight_only/run-gptq-llm.py b/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/ptq_weight_only/run-gptq-llm.py index ca0ea0c84ea..f40748c99ba 100644 --- a/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/ptq_weight_only/run-gptq-llm.py +++ b/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/ptq_weight_only/run-gptq-llm.py @@ -276,7 +276,7 @@ def skip(*args, **kwargs): # ) # q_model = quantization.fit(model, conf, calib_dataloader=calib_dataloader,) - # method 2: directly use INC build-in function, for some models like falcon, please use this function + # method 2: directly use INC built-in function, for some models like falcon, please use this function conf = { ".*":{ 'wbits': args.wbits, # 1-8 bits diff --git a/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/ptq_weight_only/run_gptj_mlperf_int4.py b/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/ptq_weight_only/run_gptj_mlperf_int4.py index 89490674613..6886c9ba7e4 100644 --- a/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/ptq_weight_only/run_gptj_mlperf_int4.py +++ b/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/ptq_weight_only/run_gptj_mlperf_int4.py @@ -31,7 +31,7 @@ def skip(*args, **kwargs): torch.nn.init.uniform_ = skip torch.nn.init.normal_ = skip from transformers import GPTJForCausalLM, AutoModelForCausalLM - model = GPTJForCausalLM.from_pretrained(model) # load the model with fp32 percision + model = GPTJForCausalLM.from_pretrained(model) # load the model with fp32 precision #model = AutoModelForCausalLM.from_pretrained(model, torch_dtype=torch.float16) return model @@ -318,7 +318,7 @@ def forward(self, *inp, **kwargs): }, }, ) - import pdb;pdb.set_trace() + q_model = quantization.fit(model, conf, calib_dataloader=dataloader,) q_model.save("./gptj-gptq-gs128-calib128-calibration-fp16/") diff --git a/examples/pytorch/nlp/huggingface_models/question-answering/distillation/eager/run_qa_no_trainer_distillation.py b/examples/pytorch/nlp/huggingface_models/question-answering/distillation/eager/run_qa_no_trainer_distillation.py index bdb085e8b2d..d05c2638fe5 100644 --- a/examples/pytorch/nlp/huggingface_models/question-answering/distillation/eager/run_qa_no_trainer_distillation.py +++ b/examples/pytorch/nlp/huggingface_models/question-answering/distillation/eager/run_qa_no_trainer_distillation.py @@ -535,7 +535,7 @@ def main(): model = AutoModelForQuestionAnswering.from_config(config) # Preprocessing the datasets. - # Preprocessing is slighlty different for training and evaluation. + # Preprocessing is slightly different for training and evaluation. column_names = raw_datasets["train"].column_names @@ -632,7 +632,7 @@ def prepare_train_features(examples, tokenizer=tokenizer): raise ValueError("--do_train requires a train dataset") train_examples = raw_datasets["train"] if args.max_train_samples is not None: - # We will select sample from whole data if agument is specified + # We will select sample from whole data if argument is specified train_examples = train_examples.select(range(args.max_train_samples)) # Create train feature from dataset train_dataset = train_examples.map( diff --git a/examples/pytorch/nlp/huggingface_models/question-answering/mixed_precision/ipex/run_qa.py b/examples/pytorch/nlp/huggingface_models/question-answering/mixed_precision/ipex/run_qa.py index 61b6db3bb97..76544dbc46e 100644 --- a/examples/pytorch/nlp/huggingface_models/question-answering/mixed_precision/ipex/run_qa.py +++ b/examples/pytorch/nlp/huggingface_models/question-answering/mixed_precision/ipex/run_qa.py @@ -343,7 +343,7 @@ def main(): ) # Preprocessing the datasets. - # Preprocessing is slighlty different for training and evaluation. + # Preprocessing is slightly different for training and evaluation. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: diff --git a/examples/pytorch/nlp/huggingface_models/question-answering/model_slim/README.md b/examples/pytorch/nlp/huggingface_models/question-answering/model_slim/README.md index b86ba8ecb31..796fa6e02fa 100644 --- a/examples/pytorch/nlp/huggingface_models/question-answering/model_slim/README.md +++ b/examples/pytorch/nlp/huggingface_models/question-answering/model_slim/README.md @@ -8,7 +8,7 @@ To be specific, if a model has two consecutive linear layers, which is common in This leads to no change for model's accuracy, but can obtain a significant acceleration for model's inference, because the transformer models' FFN parts take nearly 50% of entire computing overhead. Thus, compressing weights in FFN parts is really useful. ## Multi-head Pruning for Self-Attention Layers -Self attention modules are common in all Transformer-based models. These models use multi-head attention (also known as MHA) to enhance their abilities of linking contextual information. Transformer-based models usually stack a sequence of MHA modules, and this makes MHA takes a noticable storage and memory bandwith. As an optimization method, head pruning removes attention heads which make minor contribution to model's contextual analysis. This method does not lead to much accuracy loss, but provides us with much opportunity for model acceleration. +Self attention modules are common in all Transformer-based models. These models use multi-head attention (also known as MHA) to enhance their abilities of linking contextual information. Transformer-based models usually stack a sequence of MHA modules, and this makes MHA takes a noticeable storage and memory bandwidth. As an optimization method, head pruning removes attention heads which make minor contribution to model's contextual analysis. This method does not lead to much accuracy loss, but provides us with much opportunity for model acceleration. ## API for Consecutive Linear Layers and Multi-head attention Slim. We provide API functions for you to complete the process above and slim your transformer models easily. Here is how to call our API functions. Simply provide a target sparsity value to our Our API function **parse_auto_slim_config** and it can generate the [pruning_configs](https://github.com/intel/neural-compressor/tree/master/neural_compressor/compression/pruner#get-started-with-pruning-api) used by our pruning API. Such process is fully automatic and target linear layers will be included without manual setting. After pruning process finished, use API function **model_slim** to slim the model. diff --git a/examples/pytorch/nlp/huggingface_models/question-answering/model_slim/run_qa_no_trainer_auto_slim.py b/examples/pytorch/nlp/huggingface_models/question-answering/model_slim/run_qa_no_trainer_auto_slim.py index d245fb24df9..5fd974fb199 100644 --- a/examples/pytorch/nlp/huggingface_models/question-answering/model_slim/run_qa_no_trainer_auto_slim.py +++ b/examples/pytorch/nlp/huggingface_models/question-answering/model_slim/run_qa_no_trainer_auto_slim.py @@ -569,7 +569,7 @@ def main(): model = AutoModelForQuestionAnswering.from_config(config) # Preprocessing the datasets. - # Preprocessing is slighlty different for training and evaluation. + # Preprocessing is slightly different for training and evaluation. column_names = raw_datasets["train"].column_names @@ -670,7 +670,7 @@ def prepare_train_features(examples): raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if args.max_train_samples is not None: - # We will select sample from whole data if agument is specified + # We will select sample from whole data if argument is specified train_dataset = train_dataset.select(range(args.max_train_samples)) # Create train feature from dataset diff --git a/examples/pytorch/nlp/huggingface_models/question-answering/optimization_pipeline/prune_once_for_all/fx/README.md b/examples/pytorch/nlp/huggingface_models/question-answering/optimization_pipeline/prune_once_for_all/fx/README.md index bd47d5ce63f..1f95b57b740 100644 --- a/examples/pytorch/nlp/huggingface_models/question-answering/optimization_pipeline/prune_once_for_all/fx/README.md +++ b/examples/pytorch/nlp/huggingface_models/question-answering/optimization_pipeline/prune_once_for_all/fx/README.md @@ -50,7 +50,7 @@ We also supported Distributed Data Parallel training on single node and multi no
For example, bash command of stage 1 for SQuAD task will look like the following, where *``* is the address of the master node, it won't be necessary for single node case, *``* is the desired processes to use in current node, for node with GPU, usually set to number of GPUs in this node, for node without GPU and use CPU for training, it's recommended set to 1, *``* is the number of nodes to use, *``* is the rank of the current node, rank starts from 0 to *``*`-1`.
-Also please note that to use CPU for training in each node with multi nodes settings, argument `--no_cuda` is mandatory. In multi nodes setting, following command needs to be lanuched in each node, and all the commands should be the same except for *``*, which should be integer from 0 to *``*`-1` assigned to each node. +Also please note that to use CPU for training in each node with multi nodes settings, argument `--no_cuda` is mandatory. In multi nodes setting, following command needs to be launched in each node, and all the commands should be the same except for *``*, which should be integer from 0 to *``*`-1` assigned to each node. ```bash torchrun --master_addr= --nproc_per_node= --nnodes= --node_rank= \ diff --git a/examples/pytorch/nlp/huggingface_models/question-answering/optimization_pipeline/prune_once_for_all/fx/run_qa_no_trainer_pruneOFA.py b/examples/pytorch/nlp/huggingface_models/question-answering/optimization_pipeline/prune_once_for_all/fx/run_qa_no_trainer_pruneOFA.py index a15626f1b58..188c5a65976 100644 --- a/examples/pytorch/nlp/huggingface_models/question-answering/optimization_pipeline/prune_once_for_all/fx/run_qa_no_trainer_pruneOFA.py +++ b/examples/pytorch/nlp/huggingface_models/question-answering/optimization_pipeline/prune_once_for_all/fx/run_qa_no_trainer_pruneOFA.py @@ -544,7 +544,7 @@ def main(): 'please provide .pt file'.format(args.resume)) # Preprocessing the datasets. - # Preprocessing is slighlty different for training and evaluation. + # Preprocessing is slightly different for training and evaluation. column_names = raw_datasets["train"].column_names @@ -641,7 +641,7 @@ def prepare_train_features(examples): raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if args.max_train_samples is not None: - # We will select sample from whole data if agument is specified + # We will select sample from whole data if argument is specified train_dataset = train_dataset.select(range(args.max_train_samples)) # Create train feature from dataset train_dataset = train_dataset.map( @@ -707,7 +707,7 @@ def prepare_validation_features(examples): eval_examples = eval_examples.select(range(args.max_eval_samples)) # fx model must take input with predefined shape, evaluation of QA model - # need lengthes of dataset and dataloader be the same, + # need lengths of dataset and dataloader be the same, # so here to make length of eval_examples to multiples of batch_size. eval_examples = eval_examples.select(range((len(eval_examples) // args.batch_size) * args.batch_size)) @@ -725,7 +725,7 @@ def prepare_validation_features(examples): # During Feature creation dataset samples might increase, we will select required samples again eval_dataset = eval_dataset.select(range(args.max_eval_samples)) # fx model must take input with predefined shape, evaluation of QA model - # need lengthes of dataset and dataloader be the same, + # need lengths of dataset and dataloader be the same, # so here to make length of eval_dataset to multiples of batch_size. eval_dataset = eval_dataset.select(range((len(eval_dataset) // args.batch_size) * args.batch_size)) @@ -738,7 +738,7 @@ def prepare_validation_features(examples): predict_examples = predict_examples.select(range(args.max_predict_samples)) # fx model must take input with predefined shape, evaluation of QA model - # need lengthes of dataset and dataloader be the same, + # need lengths of dataset and dataloader be the same, # so here to make length of predict_examples to multiples of batch_size. predict_examples = predict_examples.select(range((len(predict_examples) // args.batch_size) * args.batch_size)) @@ -756,7 +756,7 @@ def prepare_validation_features(examples): predict_dataset = predict_dataset.select(range(args.max_predict_samples)) # fx model must take input with predefined shape, evaluation of QA model - # need lengthes of dataset and dataloader be the same, + # need lengths of dataset and dataloader be the same, # so here to make length of predict_dataset to multiples of batch_size. predict_dataset = predict_dataset.select(range((len(predict_dataset) // args.batch_size) * args.batch_size)) diff --git a/examples/pytorch/nlp/huggingface_models/question-answering/pruning/eager/run_qa_no_trainer.py b/examples/pytorch/nlp/huggingface_models/question-answering/pruning/eager/run_qa_no_trainer.py index d6fca2b3aa5..ca81085201d 100644 --- a/examples/pytorch/nlp/huggingface_models/question-answering/pruning/eager/run_qa_no_trainer.py +++ b/examples/pytorch/nlp/huggingface_models/question-answering/pruning/eager/run_qa_no_trainer.py @@ -556,7 +556,7 @@ def main(): model = AutoModelForQuestionAnswering.from_config(config) # Preprocessing the datasets. - # Preprocessing is slighlty different for training and evaluation. + # Preprocessing is slightly different for training and evaluation. column_names = raw_datasets["train"].column_names @@ -657,7 +657,7 @@ def prepare_train_features(examples): raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if args.max_train_samples is not None: - # We will select sample from whole data if agument is specified + # We will select sample from whole data if argument is specified train_dataset = train_dataset.select(range(args.max_train_samples)) # Create train feature from dataset diff --git a/examples/pytorch/nlp/huggingface_models/question-answering/pruning/eager/run_qa_no_trainer_block.py b/examples/pytorch/nlp/huggingface_models/question-answering/pruning/eager/run_qa_no_trainer_block.py index 0e93aa7d1f3..96665051273 100644 --- a/examples/pytorch/nlp/huggingface_models/question-answering/pruning/eager/run_qa_no_trainer_block.py +++ b/examples/pytorch/nlp/huggingface_models/question-answering/pruning/eager/run_qa_no_trainer_block.py @@ -557,7 +557,7 @@ def main(): model = AutoModelForQuestionAnswering.from_config(config) # Preprocessing the datasets. - # Preprocessing is slighlty different for training and evaluation. + # Preprocessing is slightly different for training and evaluation. column_names = raw_datasets["train"].column_names @@ -658,7 +658,7 @@ def prepare_train_features(examples): raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if args.max_train_samples is not None: - # We will select sample from whole data if agument is specified + # We will select sample from whole data if argument is specified train_dataset = train_dataset.select(range(args.max_train_samples)) # Create train feature from dataset diff --git a/examples/pytorch/nlp/huggingface_models/question-answering/quantization/ptq_static/fx/run_qa.py b/examples/pytorch/nlp/huggingface_models/question-answering/quantization/ptq_static/fx/run_qa.py index 37656ba22cd..7ef1da864e8 100644 --- a/examples/pytorch/nlp/huggingface_models/question-answering/quantization/ptq_static/fx/run_qa.py +++ b/examples/pytorch/nlp/huggingface_models/question-answering/quantization/ptq_static/fx/run_qa.py @@ -338,7 +338,7 @@ def main(): ) # Preprocessing the datasets. - # Preprocessing is slighlty different for training and evaluation. + # Preprocessing is slightly different for training and evaluation. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: diff --git a/examples/pytorch/nlp/huggingface_models/question-answering/quantization/ptq_static/ipex/run_qa.py b/examples/pytorch/nlp/huggingface_models/question-answering/quantization/ptq_static/ipex/run_qa.py index 1254acdcdbf..87355c59a99 100644 --- a/examples/pytorch/nlp/huggingface_models/question-answering/quantization/ptq_static/ipex/run_qa.py +++ b/examples/pytorch/nlp/huggingface_models/question-answering/quantization/ptq_static/ipex/run_qa.py @@ -344,7 +344,7 @@ def main(): ) # Preprocessing the datasets. - # Preprocessing is slighlty different for training and evaluation. + # Preprocessing is slightly different for training and evaluation. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: diff --git a/examples/pytorch/nlp/huggingface_models/text-classification/distillation/eager/run_glue_no_trainer_distillation.py b/examples/pytorch/nlp/huggingface_models/text-classification/distillation/eager/run_glue_no_trainer_distillation.py index dbabae29f97..b478d3fc3c4 100644 --- a/examples/pytorch/nlp/huggingface_models/text-classification/distillation/eager/run_glue_no_trainer_distillation.py +++ b/examples/pytorch/nlp/huggingface_models/text-classification/distillation/eager/run_glue_no_trainer_distillation.py @@ -467,7 +467,7 @@ def forward(self, *args, **kwargs): if args.task_name == "mnli" else "validation"] assert train_dataset.num_rows == teacher_train_dataset.num_rows and \ eval_dataset.num_rows == teacher_eval_dataset.num_rows, \ - "Length of train or evaluation dataset of teacher doesnot match that of student." + "Length of train or evaluation dataset of teacher does not match that of student." # get logits of teacher model if args.loss_weights[1] > 0: diff --git a/examples/pytorch/nlp/huggingface_models/text-classification/optimization_pipeline/prune_once_for_all/fx/README.md b/examples/pytorch/nlp/huggingface_models/text-classification/optimization_pipeline/prune_once_for_all/fx/README.md index 52d1ba575cc..c1dd88f49c1 100644 --- a/examples/pytorch/nlp/huggingface_models/text-classification/optimization_pipeline/prune_once_for_all/fx/README.md +++ b/examples/pytorch/nlp/huggingface_models/text-classification/optimization_pipeline/prune_once_for_all/fx/README.md @@ -99,11 +99,11 @@ python run_glue_no_trainer_pruneOFA.py --task_name qnli \ --resume /path/to/stage1_output_dir/best_model.pt --pad_to_max_length ``` -We supporte Distributed Data Parallel training on single node and multi nodes settings. To use Distributed Data Parallel to speedup training, the bash command needs a small adjustment. +We supported Distributed Data Parallel training on single node and multi nodes settings. To use Distributed Data Parallel to speedup training, the bash command needs a small adjustment.
For example, bash command of stage 1 for SST2 task will look like the following, where *``* is the address of the master node, it won't be necessary for single node case, *``* is the desired processes to use in current node, for node with GPU, usually set to number of GPUs in this node, for node without GPU and use CPU for training, it's recommended set to 1, *``* is the number of nodes to use, *``* is the rank of the current node, rank starts from 0 to *``*`-1`.
-Also please aware that using CPU for training in each node with multi nodes settings, argument `--no_cuda` is mandatory. In multi-nodes setting, the following command needs to be lanuched in each node, and all the commands should be the same except for *``*, which should be integer from 0 to *``*`-1` assigned to each node. +Also please aware that using CPU for training in each node with multi nodes settings, argument `--no_cuda` is mandatory. In multi-nodes setting, the following command needs to be launched in each node, and all the commands should be the same except for *``*, which should be integer from 0 to *``*`-1` assigned to each node. ```bash python -m torch.distributed.launch --master_addr= --nproc_per_node= --nnodes= --node_rank= \ diff --git a/examples/pytorch/nlp/huggingface_models/text-classification/quantization/qat/fx/README.md b/examples/pytorch/nlp/huggingface_models/text-classification/quantization/qat/fx/README.md index 7f52de526aa..d13a2f344dc 100644 --- a/examples/pytorch/nlp/huggingface_models/text-classification/quantization/qat/fx/README.md +++ b/examples/pytorch/nlp/huggingface_models/text-classification/quantization/qat/fx/README.md @@ -31,7 +31,7 @@ pip install -r requirements.txt The changes made are as the following: * edit run_glue.py: - For quantization, We used neural_compressor in it. - - For training, we enbaled early stop strategy. + - For training, we enabled early stop strategy. ## 2. To get tuned model and its accuracy: ```shell bash run_quant.sh --input_model=./bert_model --output_model=./saved_results diff --git a/examples/pytorch/nlp/huggingface_models/translation/pruning/eager/run_translation_no_trainer.py b/examples/pytorch/nlp/huggingface_models/translation/pruning/eager/run_translation_no_trainer.py index 8f59160fee9..fc2036056ab 100644 --- a/examples/pytorch/nlp/huggingface_models/translation/pruning/eager/run_translation_no_trainer.py +++ b/examples/pytorch/nlp/huggingface_models/translation/pruning/eager/run_translation_no_trainer.py @@ -666,7 +666,7 @@ def main(): prefix = args.source_prefix if args.source_prefix is not None else "" # Preprocessing the datasets. - # Preprocessing is slighlty different for training and evaluation. + # Preprocessing is slightly different for training and evaluation. column_names = raw_datasets["train"].column_names diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/coco.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/coco.py index 653aa9ba199..3235819d797 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/coco.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/coco.py @@ -202,7 +202,7 @@ def finalize(self, result_dict, ds=None, output_dir=None): image_idx = int(detection[0]) if image_idx != self.content_ids[batch]: # working with the coco index/id is error prone - extra check to make sure it is consistent - log.error("image_idx missmatch, lg={} / result={}".format(image_idx, self.content_ids[batch])) + log.error("image_idx mismatch, lg={} / result={}".format(image_idx, self.content_ids[batch])) # map the index to the coco image id detection[0] = ds.image_ids[image_idx] height, width = ds.image_sizes[image_idx] diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/base_model_r34.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/base_model_r34.py index ea224a7caa2..4efcc58c602 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/base_model_r34.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/base_model_r34.py @@ -87,7 +87,7 @@ def __init__(self, scale=20, chan_num=512): nn.Parameter(torch.Tensor([scale]*chan_num).view(1, chan_num, 1, 1)) def forward(self, data): - # normalize accross channel + # normalize across channel return self.scale*data*data.pow(2).sum(dim=1, keepdim=True).clamp(min=1e-12).rsqrt() @@ -109,7 +109,7 @@ def tailor_module(src_model, src_dir, tgt_model, tgt_dir): state[k2] = src_state[k1] #diff_keys = state.keys() - target_model.state_dict().keys() #print("Different Keys:", diff_keys) - # Remove unecessary keys + # Remove unnecessary keys #for k in diff_keys: # state.pop(k) tgt_model.load_state_dict(state) @@ -136,7 +136,7 @@ def make_layers(cfg, batch_norm=False): class Loss(nn.Module): """ - Implements the loss as the sum of the followings: + Implements the loss as the sum of the following: 1. Confidence Loss: All labels, with hard negative mining 2. Localization Loss: Only on positive labels Suppose input dboxes has the shape 8732x4 @@ -182,7 +182,7 @@ def forward(self, ploc, plabel, gloc, glabel): # hard negative mining con = self.con_loss(plabel, glabel) - # postive mask will never selected + # positive mask will never selected con_neg = con.clone() con_neg[mask] = 0 _, con_idx = con_neg.sort(dim=1, descending=True) diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/ssd_r34.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/ssd_r34.py index c9c88048e96..1d7522acbe8 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/ssd_r34.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/ssd_r34.py @@ -11,7 +11,7 @@ class Encoder(object): """ - Transform between (bboxes, lables) <-> SSD output + Transform between (bboxes, labels) <-> SSD output dboxes: default boxes in size 8732 x 4, encoder: input ltrb format, output xywh format @@ -247,7 +247,7 @@ def __init__(self, label_num=81, backbone='resnet34', model_path="./resnet34-333 self._build_additional_features(self.out_chan) self.extract_shapes=extract_shapes # after l2norm, conv7, conv8_2, conv9_2, conv10_2, conv11_2 - # classifer 1, 2, 3, 4, 5 ,6 + # classifier 1, 2, 3, 4, 5 ,6 self.num_defaults = [4, 6, 6, 6, 4, 4] self.loc = [] diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/tools/pylintrc b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/tools/pylintrc index 955e353049d..c1152f77481 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/tools/pylintrc +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/tools/pylintrc @@ -34,7 +34,7 @@ enable=indexing-exception,old-raise-syntax # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if +# disable everything first and then re-enable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/base_model.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/base_model.py index 11e4905862a..fd24aaa63f8 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/base_model.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/base_model.py @@ -108,7 +108,7 @@ def __init__(self, scale=20, chan_num=512): nn.Parameter(torch.Tensor([scale]*chan_num).view(1, chan_num, 1, 1)) def forward(self, data): - # normalize accross channel + # normalize across channel return self.scale*data*data.pow(2).sum(dim=1, keepdim=True).clamp(min=1e-12).rsqrt() @@ -130,7 +130,7 @@ def tailor_module(src_model, src_dir, tgt_model, tgt_dir): state[k2] = src_state[k1] #diff_keys = state.keys() - target_model.state_dict().keys() #print("Different Keys:", diff_keys) - # Remove unecessary keys + # Remove unnecessary keys #for k in diff_keys: # state.pop(k) tgt_model.load_state_dict(state) @@ -157,7 +157,7 @@ def make_layers(cfg, batch_norm=False): class Loss(nn.Module): """ - Implements the loss as the sum of the followings: + Implements the loss as the sum of the following: 1. Confidence Loss: All labels, with hard negative mining 2. Localization Loss: Only on positive labels Suppose input dboxes has the shape 8732x4 @@ -203,7 +203,7 @@ def forward(self, ploc, plabel, gloc, glabel): # hard negative mining con = self.con_loss(plabel, glabel) - # postive mask will never selected + # positive mask will never selected con_neg = con.clone() con_neg[mask] = 0 _, con_idx = con_neg.sort(dim=1, descending=True) diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/infer_weight_sharing.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/infer_weight_sharing.py index 0657dc48d87..5ea72b149e6 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/infer_weight_sharing.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/infer_weight_sharing.py @@ -239,7 +239,7 @@ def coco_eval(model, val_dataloader, cocoGt, encoder, inv_map, args): model_decode.model.model = optimization.fuse(model_decode.model.model, inplace=False) if args.calibration: - print("runing int8 LLGA calibration step not support in throughput benchmark") + print("running int8 LLGA calibration step not support in throughput benchmark") else: print("INT8 LLGA start trace") # insert quant/dequant based on configure.json diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/ssd300.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/ssd300.py index cdc17acd724..62446aaa048 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/ssd300.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/ssd300.py @@ -28,7 +28,7 @@ def __init__(self, label_num, backbone='resnet34', model_path="./resnet34-333f7e self._build_additional_features(out_size, self.out_chan) # after l2norm, conv7, conv8_2, conv9_2, conv10_2, conv11_2 - # classifer 1, 2, 3, 4, 5 ,6 + # classifier 1, 2, 3, 4, 5 ,6 self.num_defaults = [4, 6, 6, 6, 4, 4] self.loc = [] diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/ssd_r34.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/ssd_r34.py index 4e2db16cb99..3390e61f24c 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/ssd_r34.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/ssd_r34.py @@ -52,7 +52,7 @@ def __init__(self, label_num, backbone='resnet34', model_path="./resnet34-333f7e self.additional_blocks = self._build_additional_features(self.out_chan) # after l2norm, conv7, conv8_2, conv9_2, conv10_2, conv11_2 - # classifer 1, 2, 3, 4, 5 ,6 + # classifier 1, 2, 3, 4, 5 ,6 self.num_defaults = [4, 6, 6, 6, 4, 4] diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/utils.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/utils.py index 97d73022c40..a4029314839 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/utils.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/ipex/utils.py @@ -89,7 +89,7 @@ def calc_iou_tensor(box1, box2): class Encoder(object): """ Inspired by https://github.com/kuangliu/pytorch-ssd - Transform between (bboxes, lables) <-> SSD output + Transform between (bboxes, labels) <-> SSD output dboxes: default boxes in size 8732 x 4, encoder: input ltrb format, output xywh format diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/qat/fx/ssd/base_model.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/qat/fx/ssd/base_model.py index 9402d5d3314..2efa74b63bc 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/qat/fx/ssd/base_model.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/qat/fx/ssd/base_model.py @@ -89,7 +89,7 @@ def __init__(self, scale=20, chan_num=512): nn.Parameter(torch.Tensor([scale]*chan_num).view(1, chan_num, 1, 1)) def forward(self, data): - # normalize accross channel + # normalize across channel return self.scale*data*data.pow(2).sum(dim=1, keepdim=True).clamp(min=1e-12).rsqrt() @@ -111,7 +111,7 @@ def tailor_module(src_model, src_dir, tgt_model, tgt_dir): state[k2] = src_state[k1] #diff_keys = state.keys() - target_model.state_dict().keys() #print("Different Keys:", diff_keys) - # Remove unecessary keys + # Remove unnecessary keys #for k in diff_keys: # state.pop(k) tgt_model.load_state_dict(state) @@ -138,7 +138,7 @@ def make_layers(cfg, batch_norm=False): class Loss(nn.Module): """ - Implements the loss as the sum of the followings: + Implements the loss as the sum of the following: 1. Confidence Loss: All labels, with hard negative mining 2. Localization Loss: Only on positive labels Suppose input dboxes has the shape 8732x4 @@ -186,7 +186,7 @@ def forward(self, ploc, plabel, gloc, glabel): # hard negative mining con = self.con_loss(plabel, glabel) - # postive mask will never selected + # positive mask will never selected con_neg = con.clone() con_neg[mask] = 0 _, con_idx = con_neg.sort(dim=1, descending=True) diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/qat/fx/ssd/ssd300.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/qat/fx/ssd/ssd300.py index 89d4f2c1e9c..83c364201a0 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/qat/fx/ssd/ssd300.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/qat/fx/ssd/ssd300.py @@ -27,7 +27,7 @@ def __init__(self, label_num, backbone='resnet34', model_path=None): self._build_additional_features(out_size, self.out_chan) # after l2norm, conv7, conv8_2, conv9_2, conv10_2, conv11_2 - # classifer 1, 2, 3, 4, 5 ,6 + # classifier 1, 2, 3, 4, 5 ,6 self.num_defaults = [4, 6, 6, 6, 4, 4] self.loc = [] diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/qat/fx/ssd/utils.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/qat/fx/ssd/utils.py index 22235165da2..713f8becd84 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/qat/fx/ssd/utils.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/qat/fx/ssd/utils.py @@ -62,7 +62,7 @@ def calc_iou_tensor(box1, box2): class Encoder(object): """ Inspired by https://github.com/kuangliu/pytorch-ssd - Transform between (bboxes, lables) <-> SSD output + Transform between (bboxes, labels) <-> SSD output dboxes: default boxes in size 8732 x 4, encoder: input ltrb format, output xywh format diff --git a/examples/pytorch/object_detection/yolo_v3/quantization/ptq_static/fx/detect.py b/examples/pytorch/object_detection/yolo_v3/quantization/ptq_static/fx/detect.py index f23fbc8296b..04339587c42 100644 --- a/examples/pytorch/object_detection/yolo_v3/quantization/ptq_static/fx/detect.py +++ b/examples/pytorch/object_detection/yolo_v3/quantization/ptq_static/fx/detect.py @@ -28,7 +28,7 @@ parser.add_argument("--weights_path", type=str, default="weights/yolov3.weights", help="path to weights file") parser.add_argument("--class_path", type=str, default="data/coco.names", help="path to class label file") parser.add_argument("--conf_thres", type=float, default=0.8, help="object confidence threshold") - parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression") + parser.add_argument("--nms_thres", type=float, default=0.4, help="iou threshold for non-maximum suppression") parser.add_argument("--batch_size", type=int, default=1, help="size of the batches") parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation") parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension") diff --git a/examples/pytorch/object_detection/yolo_v3/quantization/ptq_static/fx/test.py b/examples/pytorch/object_detection/yolo_v3/quantization/ptq_static/fx/test.py index 7be0e472055..f7b503e447e 100644 --- a/examples/pytorch/object_detection/yolo_v3/quantization/ptq_static/fx/test.py +++ b/examples/pytorch/object_detection/yolo_v3/quantization/ptq_static/fx/test.py @@ -103,7 +103,7 @@ def evaluate(model, path, iou_thres, conf_thres, nms_thres, img_size, batch_size parser.add_argument("--class_path", type=str, default="data/coco.names", help="path to class label file") parser.add_argument("--iou_thres", type=float, default=0.5, help="iou threshold required to qualify as detected") parser.add_argument("--conf_thres", type=float, default=0.001, help="object confidence threshold") - parser.add_argument("--nms_thres", type=float, default=0.5, help="iou thresshold for non-maximum suppression") + parser.add_argument("--nms_thres", type=float, default=0.5, help="iou threshold for non-maximum suppression") parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation") parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension") parser.add_argument('-t', '--tune', dest='tune', action='store_true', diff --git a/examples/pytorch/object_detection/yolo_v3/quantization/ptq_static/fx/utils/utils.py b/examples/pytorch/object_detection/yolo_v3/quantization/ptq_static/fx/utils/utils.py index 5eb2a1da8c6..424db374475 100644 --- a/examples/pytorch/object_detection/yolo_v3/quantization/ptq_static/fx/utils/utils.py +++ b/examples/pytorch/object_detection/yolo_v3/quantization/ptq_static/fx/utils/utils.py @@ -205,7 +205,7 @@ def bbox_iou(box1, box2, x1y1x2y2=True): b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3] b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3] - # get the corrdinates of the intersection rectangle + # get the coordinates of the intersection rectangle inter_rect_x1 = torch.max(b1_x1, b2_x1) inter_rect_y1 = torch.max(b1_y1, b2_y1) inter_rect_x2 = torch.min(b1_x2, b2_x2) diff --git a/examples/pytorch/recommendation/dlrm/quantization/ptq/fx/data_utils.py b/examples/pytorch/recommendation/dlrm/quantization/ptq/fx/data_utils.py index daeeb15dd13..00199cbcb4b 100644 --- a/examples/pytorch/recommendation/dlrm/quantization/ptq/fx/data_utils.py +++ b/examples/pytorch/recommendation/dlrm/quantization/ptq/fx/data_utils.py @@ -1046,7 +1046,7 @@ def process_one_file( # create all splits (reuse existing files if possible) recreate_flag = False convertDicts = [{} for _ in range(26)] - # WARNING: to get reproducable sub-sampling results you must reset the seed below + # WARNING: to get reproducible sub-sampling results you must reset the seed below # np.random.seed(123) # in this case there is a single split in each day for i in range(days): diff --git a/examples/pytorch/recommendation/dlrm/quantization/ptq/fx/dlrm_data_pytorch.py b/examples/pytorch/recommendation/dlrm/quantization/ptq/fx/dlrm_data_pytorch.py index 6cbe382ade3..748947065cf 100644 --- a/examples/pytorch/recommendation/dlrm/quantization/ptq/fx/dlrm_data_pytorch.py +++ b/examples/pytorch/recommendation/dlrm/quantization/ptq/fx/dlrm_data_pytorch.py @@ -266,7 +266,7 @@ def __getitem__(self, index): if self.memory_map: if self.split == 'none' or self.split == 'train': - # check if need to swicth to next day and load data + # check if need to switch to next day and load data if index == self.offset_per_file[self.day]: # print("day_boundary switch", index) self.day_boundary = self.offset_per_file[self.day] @@ -519,7 +519,7 @@ def make_criteo_data_and_loaders(args): return train_data, train_loader, test_data, test_loader -# uniform ditribution (input data) +# uniform distribution (input data) class RandomDataset(Dataset): def __init__( @@ -732,7 +732,7 @@ def generate_random_output_batch(n, num_targets, round_targets=False): return torch.tensor(P) -# uniform ditribution (input data) +# uniform distribution (input data) def generate_uniform_input_batch( m_den, ln_emb, diff --git a/examples/pytorch/recommendation/dlrm/quantization/ptq/ipex/data_utils.py b/examples/pytorch/recommendation/dlrm/quantization/ptq/ipex/data_utils.py index bf76dfffafd..6ceef9517df 100644 --- a/examples/pytorch/recommendation/dlrm/quantization/ptq/ipex/data_utils.py +++ b/examples/pytorch/recommendation/dlrm/quantization/ptq/ipex/data_utils.py @@ -1079,7 +1079,7 @@ def process_one_file( # create all splits (reuse existing files if possible) recreate_flag = False convertDicts = [{} for _ in range(26)] - # WARNING: to get reproducable sub-sampling results you must reset the seed below + # WARNING: to get reproducible sub-sampling results you must reset the seed below # np.random.seed(123) # in this case there is a single split in each day for i in range(days): diff --git a/examples/pytorch/recommendation/dlrm/quantization/ptq/ipex/dlrm_data_pytorch.py b/examples/pytorch/recommendation/dlrm/quantization/ptq/ipex/dlrm_data_pytorch.py index 93890beab65..f6f30f8e663 100644 --- a/examples/pytorch/recommendation/dlrm/quantization/ptq/ipex/dlrm_data_pytorch.py +++ b/examples/pytorch/recommendation/dlrm/quantization/ptq/ipex/dlrm_data_pytorch.py @@ -281,7 +281,7 @@ def __getitem__(self, index): if self.memory_map: if self.split == 'none' or self.split == 'train': - # check if need to swicth to next day and load data + # check if need to switch to next day and load data if index == self.offset_per_file[self.day]: # print("day_boundary switch", index) self.day_boundary = self.offset_per_file[self.day] diff --git a/examples/tensorflow/nlp/bert_base_mrpc/quantization/ptq/tokenization.py b/examples/tensorflow/nlp/bert_base_mrpc/quantization/ptq/tokenization.py index 0f4b22ebe5a..c49fa4b0d10 100644 --- a/examples/tensorflow/nlp/bert_base_mrpc/quantization/ptq/tokenization.py +++ b/examples/tensorflow/nlp/bert_base_mrpc/quantization/ptq/tokenization.py @@ -361,7 +361,7 @@ def tokenize(self, text): def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically contorl characters but we treat them + # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True diff --git a/examples/tensorflow/nlp/bert_large_squad/quantization/ptq/freeze_estimator_to_pb.py b/examples/tensorflow/nlp/bert_large_squad/quantization/ptq/freeze_estimator_to_pb.py index b3737a6d5ab..519d22ac410 100644 --- a/examples/tensorflow/nlp/bert_large_squad/quantization/ptq/freeze_estimator_to_pb.py +++ b/examples/tensorflow/nlp/bert_large_squad/quantization/ptq/freeze_estimator_to_pb.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -"""Freeze estimator to frozen pb for bert full pipline tuning.""" +"""Freeze estimator to frozen pb for bert full pipeline tuning.""" import os import modeling diff --git a/examples/tensorflow/nlp/bert_large_squad/quantization/ptq/tokenization.py b/examples/tensorflow/nlp/bert_large_squad/quantization/ptq/tokenization.py index 75662a1de6b..dbf083617f5 100644 --- a/examples/tensorflow/nlp/bert_large_squad/quantization/ptq/tokenization.py +++ b/examples/tensorflow/nlp/bert_large_squad/quantization/ptq/tokenization.py @@ -364,7 +364,7 @@ def tokenize(self, text): def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically contorl characters but we treat them + # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True diff --git a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/freeze_estimator_to_pb.py b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/freeze_estimator_to_pb.py index b3737a6d5ab..519d22ac410 100644 --- a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/freeze_estimator_to_pb.py +++ b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/freeze_estimator_to_pb.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -"""Freeze estimator to frozen pb for bert full pipline tuning.""" +"""Freeze estimator to frozen pb for bert full pipeline tuning.""" import os import modeling diff --git a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tokenization.py b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tokenization.py index 75662a1de6b..dbf083617f5 100644 --- a/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tokenization.py +++ b/examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tokenization.py @@ -364,7 +364,7 @@ def tokenize(self, text): def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically contorl characters but we treat them + # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True diff --git a/examples/tensorflow/nlp/transformer_lt/quantization/ptq/utils/tokenizer.py b/examples/tensorflow/nlp/transformer_lt/quantization/ptq/utils/tokenizer.py index 20302266acc..33f144b23fd 100644 --- a/examples/tensorflow/nlp/transformer_lt/quantization/ptq/utils/tokenizer.py +++ b/examples/tensorflow/nlp/transformer_lt/quantization/ptq/utils/tokenizer.py @@ -301,7 +301,7 @@ def match(m): replacement ('_' and '\') are returned. Note that in python, a single backslash is written as '\\', and double backslash as '\\\\'. - If m.goup(1) exists, then use the integer in m.group(1) to return a + If m.group(1) exists, then use the integer in m.group(1) to return a unicode character. Args: @@ -507,7 +507,7 @@ def _gen_new_subtoken_list( subtoken_counts, min_count, alphabet, reserved_tokens=None): """Generate candidate subtokens ordered by count, and new max subtoken length. - Add subtokens to the candiate list in order of length (longest subtokens + Add subtokens to the candidate list in order of length (longest subtokens first). When a subtoken is added, the counts of each of its prefixes are decreased. Prefixes that don't appear much outside the subtoken are not added to the candidate list. @@ -525,7 +525,7 @@ def _gen_new_subtoken_list( Args: subtoken_counts: defaultdict mapping str subtokens to int counts - min_count: int minumum count requirement for subtokens + min_count: int minimum count requirement for subtokens alphabet: set of characters. Each character is added to the subtoken list to guarantee that all tokens can be encoded. reserved_tokens: list of tokens that will be added to the beginning of the diff --git a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/_maskrcnn_tags.py b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/_maskrcnn_tags.py index 1ec5ecdf112..1ca4ee4ea76 100644 --- a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/_maskrcnn_tags.py +++ b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/_maskrcnn_tags.py @@ -37,7 +37,7 @@ BATCH_SIZE_TEST = "batch_size_test" -# Pretrained classifer model +# Pretrained classifier model BACKBONE = "backbone" # Anchor aspect ratio diff --git a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/_ncf_tags.py b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/_ncf_tags.py index a1235b818da..e14e7197c9f 100644 --- a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/_ncf_tags.py +++ b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/_ncf_tags.py @@ -29,7 +29,7 @@ PREPROC_HP_SAMPLE_EVAL_REPLACEMENT = "preproc_hp_sample_eval_replacement" -# The number of false negatives per postive generated during training. +# The number of false negatives per positive generated during training. INPUT_HP_NUM_NEG = "input_hp_num_neg" # Are training negatives sampled with replacement? diff --git a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/_ssd_tags.py b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/_ssd_tags.py index f1a87bea76a..605bf0b78f7 100644 --- a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/_ssd_tags.py +++ b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/_ssd_tags.py @@ -20,7 +20,7 @@ from __future__ import print_function -# Pretrained classifer model +# Pretrained classifier model BACKBONE = "backbone" FEATURE_SIZES = "feature_sizes" diff --git a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/mlperf_log.py b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/mlperf_log.py index afe340ba032..a6c2ded71a7 100644 --- a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/mlperf_log.py +++ b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/mlperf_log.py @@ -86,8 +86,8 @@ def _mlperf_print(key, value=None, benchmark=None, stack_offset=0, value: The value which contains no newlines. benchmark: The short code for the benchmark being run, see the MLPerf log spec. stack_offset: Increase the value to go deeper into the stack to find the callsite. For example, if this - is being called by a wraper/helper you may want to set stack_offset=1 to use the callsite - of the wraper/helper itself. + is being called by a wrapper/helper you may want to set stack_offset=1 to use the callsite + of the wrapper/helper itself. tag_set: The set of tags in which key must belong. deferred: The value is not presently known. In that case, a unique ID will be assigned as the value of this call and will be returned. The diff --git a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/tags.py b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/tags.py index 94bd4bb2797..5f82954929f 100644 --- a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/tags.py +++ b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/mlperf_compliance/tags.py @@ -77,7 +77,7 @@ # This tag signals that a submission has reached the relevant stopping criteria, # and has completed all tasks which are performed in the reference. The wall # time for a submission will be computed as the difference between the time -# when this tag is emitted and the time whe the RUN_START is emitted. +# when this tag is emitted and the time when the RUN_START is emitted. RUN_STOP = "run_stop" # This tag should be emitted immediately before ending a run, and should be the diff --git a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/run_inference.py b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/run_inference.py index 30f8f80d024..198d050bb0b 100644 --- a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/run_inference.py +++ b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/run_inference.py @@ -143,7 +143,7 @@ def bleu_tokenize(string): except when a punctuation is preceded and followed by a digit (e.g. a comma/dot as a thousand/decimal separator). - Note that a numer (e.g. a year) followed by a dot at the end of sentence + Note that a number (e.g. a year) followed by a dot at the end of sentence is NOT tokenized, i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` does not match this case (unless we add a space after each sentence). diff --git a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/utils/tokenizer.py b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/utils/tokenizer.py index 9951f99f3e9..effb747cdf9 100644 --- a/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/utils/tokenizer.py +++ b/examples/tensorflow/nlp/transformer_lt_mlperf/quantization/ptq/utils/tokenizer.py @@ -304,7 +304,7 @@ def match(m): replacement ('_' and '\') are returned. Note that in python, a single backslash is written as '\\', and double backslash as '\\\\'. - If m.goup(1) exists, then use the integer in m.group(1) to return a + If m.group(1) exists, then use the integer in m.group(1) to return a unicode character. Args: @@ -503,7 +503,7 @@ def _gen_new_subtoken_list( subtoken_counts, min_count, alphabet, reserved_tokens=None): """Generate candidate subtokens ordered by count, and new max subtoken length. - Add subtokens to the candiate list in order of length (longest subtokens + Add subtokens to the candidate list in order of length (longest subtokens first). When a subtoken is added, the counts of each of its prefixes are decreased. Prefixes that don't appear much outside the subtoken are not added to the candidate list. @@ -521,7 +521,7 @@ def _gen_new_subtoken_list( Args: subtoken_counts: defaultdict mapping str subtokens to int counts - min_count: int minumum count requirement for subtokens + min_count: int minimum count requirement for subtokens alphabet: set of characters. Each character is added to the subtoken list to guarantee that all tokens can be encoded. reserved_tokens: list of tokens that will be added to the beginning of the diff --git a/examples/tensorflow/object_detection/tensorflow_models/faster_rcnn_resnet50/quantization/ptq/main.py b/examples/tensorflow/object_detection/tensorflow_models/faster_rcnn_resnet50/quantization/ptq/main.py index 14d1ffabce7..d46f7cd3633 100644 --- a/examples/tensorflow/object_detection/tensorflow_models/faster_rcnn_resnet50/quantization/ptq/main.py +++ b/examples/tensorflow/object_detection/tensorflow_models/faster_rcnn_resnet50/quantization/ptq/main.py @@ -124,4 +124,3 @@ def main(_): if __name__ == "__main__": tf.compat.v1.app.run() - \ No newline at end of file diff --git a/examples/tensorflow/oob_models/quantization/ptq/dataloaders.py b/examples/tensorflow/oob_models/quantization/ptq/dataloaders.py index 282f878f419..35fd270c674 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/dataloaders.py +++ b/examples/tensorflow/oob_models/quantization/ptq/dataloaders.py @@ -37,4 +37,3 @@ def _generate_dataloader(self, dataset, batch_size, last_batch, collate_fn, samp yield data except StopIteration: return - \ No newline at end of file diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/preprocess.py b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/preprocess.py index 681bd5cf09e..048eb0e91cb 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/preprocess.py +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/preprocess.py @@ -96,7 +96,7 @@ def preprocess_setup(preprocessed_data_dir): # Preprocess images, returns filenames list # This runs in multiprocess - print("Acually preprocessing data...") + print("Actually preprocessing data...") preprocessed_files = preprocess_MLPerf(model_dir, checkpoint_name, fold, fp16, list_of_lists, validation_files, preprocessed_data_dir, num_threads_preprocessing) diff --git a/neural_compressor/conf/config.py b/neural_compressor/conf/config.py index 52003eb82fb..f7b224028d6 100644 --- a/neural_compressor/conf/config.py +++ b/neural_compressor/conf/config.py @@ -812,7 +812,7 @@ def percent_to_float(data): Optional('approach', default='post_training_static_quant'): And( str, # TODO check if framework support dynamic quantize - # Now only onnruntime and pytorch supoort + # Now only onnruntime and pytorch support lambda s: s in ['post_training_static_quant', 'post_training_dynamic_quant', 'post_training_auto_quant', diff --git a/pyproject.toml b/pyproject.toml index 626ee04b1bb..d6b723babd7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.isort] profile = "black" line_length = 120 -known_first_party = ["neural_compressor"] +known_first_party = ["neural_compressor", "neural_insights", "neural_solution"] extend_skip_glob = ["**/__init__.py"] @@ -10,10 +10,10 @@ line-length = 120 [tool.codespell] -skip = '*.po,*.ts,*.js,*.map,*.js.map,*.css.map,.azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt' +skip = '*.po,*.ts,*.js,*.map,*.js.map,*.css.map,.azure-pipelines/scripts/codeScan/codespell/inc_dict.txt' count = '' quiet-level = 3 -ignore-words = ".azure-pipelines/scripts/codeScan/pyspelling/inc_dict.txt" +ignore-words = ".azure-pipelines/scripts/codeScan/codespell/inc_dict.txt" [tool.ruff] @@ -23,8 +23,8 @@ ignore = [ "E402", # Module level import not at top of file "E501", # Line too long (121 > 120 characters) "E721", # Do not compare types, use isinstance() - "E731", # Do not assign a lambda expression, use a def "E722", # Do not use bare except + "E731", # Do not assign a lambda expression, use a def "E741", # Do not use variables named ‘l’, ‘O’, or ‘I’ "F401", # {name} imported but unused "F403", # from {name} import * used; unable to detect undefined names