From b7dd3fbabfede029c6d31701a00c9af3ff12191f Mon Sep 17 00:00:00 2001 From: Mo Kweon Date: Wed, 14 Jul 2021 13:34:30 -0700 Subject: [PATCH 1/2] feat: add a PR-331 video - Close #165 --- server/internal/data/mapping_table.pbtxt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/server/internal/data/mapping_table.pbtxt b/server/internal/data/mapping_table.pbtxt index 9231acbb..fb450c4e 100644 --- a/server/internal/data/mapping_table.pbtxt +++ b/server/internal/data/mapping_table.pbtxt @@ -2451,3 +2451,10 @@ rows: { paper_arxiv_ids: "2012.12877" youtube_video_id: "A3RrAIx-KCc" } +rows: { + pr_id: 331 + paper_arxiv_ids: "2106.07998" + paper_arxiv_ids: "1706.04599" + paper_arxiv_ids: "2106.07998" + youtube_video_id: "rI-vJuNKyIU" +} From b9100be4b1e91c19dfd744ba217200ae3d24c981 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EC=BD=94=EB=94=A9=EB=83=84=EB=B9=84?= Date: Wed, 14 Jul 2021 20:38:04 +0000 Subject: [PATCH 2/2] chore: update database --- server/internal/data/database.pbtxt | 229 ++++++++++++++++++++++------ 1 file changed, 180 insertions(+), 49 deletions(-) diff --git a/server/internal/data/database.pbtxt b/server/internal/data/database.pbtxt index 2b5425ce..aea243a1 100644 --- a/server/internal/data/database.pbtxt +++ b/server/internal/data/database.pbtxt @@ -3332,7 +3332,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -4838,7 +4838,7 @@ pr_id_to_video: { url: "https://github.com/xmu-xiaoma666/External-Attention-pytorch" owner: "xmu-xiaoma666" framework: FRAMEWORK_PYTORCH - number_of_stars: 835 + number_of_stars: 840 description: "🍀 Pytorch implementation of various Attention Mechanisms, MLP, Re-parameter, Convolution, which is helpful to further understand papers.⭐⭐⭐" } repositories: { @@ -10194,7 +10194,7 @@ pr_id_to_video: { url: "https://github.com/PaddlePaddle/PaddleSeg" owner: "PaddlePaddle" framework: FRAMEWORK_OTHERS - number_of_stars: 1761 + number_of_stars: 1763 description: "End-to-end image segmentation kit based on PaddlePaddle. " } repositories: { @@ -12290,7 +12290,7 @@ pr_id_to_video: { url: "https://github.com/open-mmlab/mmdetection" owner: "open-mmlab" framework: FRAMEWORK_PYTORCH - number_of_stars: 15627 + number_of_stars: 15628 description: "OpenMMLab Detection Toolbox and Benchmark" } repositories: { @@ -13813,7 +13813,7 @@ pr_id_to_video: { url: "https://github.com/open-mmlab/mmdetection" owner: "open-mmlab" framework: FRAMEWORK_PYTORCH - number_of_stars: 15627 + number_of_stars: 15628 description: "OpenMMLab Detection Toolbox and Benchmark" } repositories: { @@ -15182,7 +15182,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -16710,7 +16710,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -22865,7 +22865,7 @@ pr_id_to_video: { url: "https://github.com/lucidrains/DALLE-pytorch" owner: "lucidrains" framework: FRAMEWORK_PYTORCH - number_of_stars: 3150 + number_of_stars: 3154 description: "Implementation / replication of DALL-E, OpenAI's Text to Image Transformer, in Pytorch" } repositories: { @@ -23908,7 +23908,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -26218,7 +26218,7 @@ pr_id_to_video: { url: "https://github.com/tianweiy/CenterPoint-KITTI" owner: "tianweiy" framework: FRAMEWORK_PYTORCH - number_of_stars: 53 + number_of_stars: 54 } repositories: { url: "https://github.com/darrenjkt/CenterPoint" @@ -26501,7 +26501,7 @@ pr_id_to_video: { url: "https://github.com/open-mmlab/mmdetection" owner: "open-mmlab" framework: FRAMEWORK_PYTORCH - number_of_stars: 15627 + number_of_stars: 15628 description: "OpenMMLab Detection Toolbox and Benchmark" } repositories: { @@ -28841,7 +28841,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -29482,7 +29482,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -29508,7 +29508,7 @@ pr_id_to_video: { url: "https://github.com/open-mmlab/mmdetection" owner: "open-mmlab" framework: FRAMEWORK_PYTORCH - number_of_stars: 15627 + number_of_stars: 15628 description: "OpenMMLab Detection Toolbox and Benchmark" } repositories: { @@ -29586,7 +29586,7 @@ pr_id_to_video: { url: "https://github.com/xmu-xiaoma666/External-Attention-pytorch" owner: "xmu-xiaoma666" framework: FRAMEWORK_PYTORCH - number_of_stars: 835 + number_of_stars: 840 description: "🍀 Pytorch implementation of various Attention Mechanisms, MLP, Re-parameter, Convolution, which is helpful to further understand papers.⭐⭐⭐" } repositories: { @@ -33228,7 +33228,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -33660,7 +33660,7 @@ pr_id_to_video: { url: "https://github.com/PaddlePaddle/PaddleSeg" owner: "PaddlePaddle" framework: FRAMEWORK_OTHERS - number_of_stars: 1761 + number_of_stars: 1763 description: "End-to-end image segmentation kit based on PaddlePaddle. " } repositories: { @@ -36704,7 +36704,7 @@ pr_id_to_video: { url: "https://github.com/open-mmlab/mmdetection" owner: "open-mmlab" framework: FRAMEWORK_PYTORCH - number_of_stars: 15627 + number_of_stars: 15628 description: "OpenMMLab Detection Toolbox and Benchmark" } repositories: { @@ -37179,7 +37179,7 @@ pr_id_to_video: { url: "https://github.com/google-research/vision_transformer" owner: "google-research" framework: FRAMEWORK_OTHERS - number_of_stars: 3099 + number_of_stars: 3100 } repositories: { url: "https://github.com/wangguanan/light-reid" @@ -37220,7 +37220,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -39304,7 +39304,7 @@ pr_id_to_video: { url: "https://github.com/open-mmlab/mmdetection" owner: "open-mmlab" framework: FRAMEWORK_PYTORCH - number_of_stars: 15627 + number_of_stars: 15628 description: "OpenMMLab Detection Toolbox and Benchmark" } repositories: { @@ -39425,7 +39425,7 @@ pr_id_to_video: { url: "https://github.com/open-mmlab/mmdetection" owner: "open-mmlab" framework: FRAMEWORK_PYTORCH - number_of_stars: 15627 + number_of_stars: 15628 description: "OpenMMLab Detection Toolbox and Benchmark" } methods: { @@ -40524,7 +40524,7 @@ pr_id_to_video: { url: "https://github.com/microsoft/unilm" owner: "microsoft" framework: FRAMEWORK_PYTORCH - number_of_stars: 2310 + number_of_stars: 2311 description: "UniLM AI - Unified \"Language\" Model Pre-training across Tasks, Languages, and Modalities" } repositories: { @@ -41324,7 +41324,7 @@ pr_id_to_video: { url: "https://github.com/xmu-xiaoma666/External-Attention-pytorch" owner: "xmu-xiaoma666" framework: FRAMEWORK_PYTORCH - number_of_stars: 835 + number_of_stars: 840 description: "🍀 Pytorch implementation of various Attention Mechanisms, MLP, Re-parameter, Convolution, which is helpful to further understand papers.⭐⭐⭐" } repositories: { @@ -41339,7 +41339,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -41410,7 +41410,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -41480,7 +41480,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -41744,7 +41744,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -42878,7 +42878,7 @@ pr_id_to_video: { url: "https://github.com/lucidrains/DALLE-pytorch" owner: "lucidrains" framework: FRAMEWORK_PYTORCH - number_of_stars: 3150 + number_of_stars: 3154 description: "Implementation / replication of DALL-E, OpenAI's Text to Image Transformer, in Pytorch" } repositories: { @@ -43780,7 +43780,7 @@ pr_id_to_video: { url: "https://github.com/open-mmlab/mmdetection" owner: "open-mmlab" framework: FRAMEWORK_PYTORCH - number_of_stars: 15627 + number_of_stars: 15628 description: "OpenMMLab Detection Toolbox and Benchmark" } repositories: { @@ -46949,7 +46949,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -47009,7 +47009,7 @@ pr_id_to_video: { url: "https://github.com/google-research/vision_transformer" owner: "google-research" framework: FRAMEWORK_OTHERS - number_of_stars: 3099 + number_of_stars: 3100 } } video: { @@ -47594,7 +47594,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -47608,7 +47608,7 @@ pr_id_to_video: { url: "https://github.com/google-research/vision_transformer" owner: "google-research" framework: FRAMEWORK_OTHERS - number_of_stars: 3099 + number_of_stars: 3100 } repositories: { url: "https://github.com/labmlai/annotated_deep_learning_paper_implementations/tree/master/labml_nn/transformers/mlp_mixer" @@ -47793,7 +47793,7 @@ pr_id_to_video: { url: "https://github.com/xmu-xiaoma666/External-Attention-pytorch" owner: "xmu-xiaoma666" framework: FRAMEWORK_PYTORCH - number_of_stars: 835 + number_of_stars: 840 description: "🍀 Pytorch implementation of various Attention Mechanisms, MLP, Re-parameter, Convolution, which is helpful to further understand papers.⭐⭐⭐" } repositories: { @@ -47820,7 +47820,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -47925,7 +47925,7 @@ pr_id_to_video: { url: "https://github.com/google-research/vision_transformer" owner: "google-research" framework: FRAMEWORK_OTHERS - number_of_stars: 3099 + number_of_stars: 3100 } } papers: { @@ -48031,7 +48031,7 @@ pr_id_to_video: { url: "https://github.com/facebookresearch/dino" owner: "facebookresearch" framework: FRAMEWORK_PYTORCH - number_of_stars: 2547 + number_of_stars: 2548 description: "PyTorch code for Vision Transformers training with the Self-Supervised learning method DINO" } methods: { @@ -48852,7 +48852,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -48866,7 +48866,7 @@ pr_id_to_video: { url: "https://github.com/google-research/vision_transformer" owner: "google-research" framework: FRAMEWORK_OTHERS - number_of_stars: 3099 + number_of_stars: 3100 } repositories: { url: "https://github.com/labmlai/annotated_deep_learning_paper_implementations/tree/master/labml_nn/transformers/mlp_mixer" @@ -49057,7 +49057,7 @@ pr_id_to_video: { url: "https://github.com/google-research/vision_transformer" owner: "google-research" framework: FRAMEWORK_OTHERS - number_of_stars: 3099 + number_of_stars: 3100 } repositories: { url: "https://github.com/wangguanan/light-reid" @@ -49098,7 +49098,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -50388,7 +50388,7 @@ pr_id_to_video: { url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } repositories: { @@ -50402,7 +50402,7 @@ pr_id_to_video: { url: "https://github.com/google-research/vision_transformer" owner: "google-research" framework: FRAMEWORK_OTHERS - number_of_stars: 3099 + number_of_stars: 3100 } repositories: { url: "https://github.com/labmlai/annotated_deep_learning_paper_implementations/tree/master/labml_nn/transformers/mlp_mixer" @@ -50538,14 +50538,14 @@ pr_id_to_video: { url: "https://github.com/google-research/vision_transformer" owner: "google-research" framework: FRAMEWORK_OTHERS - number_of_stars: 3099 + number_of_stars: 3100 } repositories: { is_official: true url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } } @@ -50575,14 +50575,14 @@ pr_id_to_video: { url: "https://github.com/google-research/vision_transformer" owner: "google-research" framework: FRAMEWORK_OTHERS - number_of_stars: 3099 + number_of_stars: 3100 } repositories: { is_official: true url: "https://github.com/rwightman/pytorch-image-models" owner: "rwightman" framework: FRAMEWORK_PYTORCH - number_of_stars: 11589 + number_of_stars: 11591 description: "PyTorch image models, scripts, pretrained weights -- ResNet, ResNeXT, EfficientNet, EfficientNetV2, NFNet, Vision Transformer, MixNet, MobileNet-V3/V2, RegNet, DPN, CSPNet, and more" } } @@ -50702,7 +50702,7 @@ pr_id_to_video: { video_id: "A3RrAIx-KCc" video_title: "PR-330: How To Train Your ViT? Data, Augmentation, and Regularization in Vision Transformers" number_of_likes: 20 - number_of_views: 404 + number_of_views: 405 published_date: { seconds: 1626015401 } @@ -50710,3 +50710,134 @@ pr_id_to_video: { } } } +pr_id_to_video: { + key: 331 + value: { + pr_id: 331 + papers: { + paper_id: "revisiting-the-calibration-of-modern-neural" + title: "Revisiting the Calibration of Modern Neural Networks" + arxiv_id: "2106.07998" + abstract: "Accurate estimation of predictive uncertainty (model calibration) is essential for the safe application of neural networks. Many instances of miscalibration in modern neural networks have been reported, suggesting a trend that newer, more accurate models produce poorly calibrated predictions. Here, we revisit this question for recent state-of-the-art image classification models. We systematically relate model calibration and accuracy, and find that the most recent models, notably those not using convolutions, are among the best calibrated. Trends observed in prior model generations, such as decay of calibration with distribution shift or model size, are less pronounced in recent architectures. We also show that model size and amount of pretraining do not fully explain these differences, suggesting that architecture is a major determinant of calibration properties." + published_date: { + seconds: 1623715200 + } + authors: "Matthias Minderer" + authors: "Josip Djolonga" + authors: "Rob Romijnders" + authors: "Frances Hubis" + authors: "Xiaohua Zhai" + authors: "Neil Houlsby" + authors: "Dustin Tran" + authors: "Mario Lucic" + } + papers: { + paper_id: "on-calibration-of-modern-neural-networks" + title: "On Calibration of Modern Neural Networks" + arxiv_id: "1706.04599" + abstract: "Confidence calibration -- the problem of predicting probability estimates representative of the true correctness likelihood -- is important for classification models in many applications. We discover that modern neural networks, unlike those from a decade ago, are poorly calibrated. Through extensive experiments, we observe that depth, width, weight decay, and Batch Normalization are important factors influencing calibration. We evaluate the performance of various post-processing calibration methods on state-of-the-art architectures with image and document classification datasets. Our analysis and experiments not only offer insights into neural network learning, but also provide a simple and straightforward recipe for practical settings: on most datasets, temperature scaling -- a single-parameter variant of Platt Scaling -- is surprisingly effective at calibrating predictions." + published_date: { + seconds: 1497398400 + } + authors: "Chuan Guo" + authors: "Geoff Pleiss" + authors: "Yu Sun" + authors: "Kilian Q. Weinberger" + repositories: { + url: "https://github.com/hollance/reliability-diagrams" + owner: "hollance" + framework: FRAMEWORK_PYTORCH + number_of_stars: 30 + description: "Reliability diagrams visualize whether a classifier model needs calibration" + } + repositories: { + url: "https://github.com/sleep3r/garrus" + owner: "sleep3r" + framework: FRAMEWORK_OTHERS + number_of_stars: 13 + description: "Python framework for high quality confidence estimation of deep neural networks, providing methods such as confidence calibration and ordinal ranking" + } + repositories: { + url: "https://github.com/bayesgroup/pytorch-ensembles" + owner: "bayesgroup" + framework: FRAMEWORK_PYTORCH + number_of_stars: 142 + description: "Pitfalls of In-Domain Uncertainty Estimation and Ensembling in Deep Learning, ICLR 2020" + } + repositories: { + url: "https://github.com/artnitolog/diary" + owner: "artnitolog" + framework: FRAMEWORK_OTHERS + description: "Accompanying repository for the 3rd year corsework. CMC MSU, MMF, 2020-2021." + } + repositories: { + url: "https://github.com/johntd54/stanford_car" + owner: "johntd54" + framework: FRAMEWORK_PYTORCH + number_of_stars: 5 + description: "Classification model for fine-grained visual classification on the Stanford Car dataset." + } + repositories: { + is_official: true + url: "https://github.com/gpleiss/temperature_scaling" + owner: "gpleiss" + framework: FRAMEWORK_PYTORCH + number_of_stars: 565 + description: "A simple way to calibrate your neural network." + } + repositories: { + url: "https://github.com/AnanyaKumar/verified_calibration" + owner: "AnanyaKumar" + framework: FRAMEWORK_TENSORFLOW + number_of_stars: 59 + description: "Calibration library and code for the paper: Verified Uncertainty Calibration. Ananya Kumar, Percy Liang, Tengyu Ma. NeurIPS 2019 (Spotlight)." + } + repositories: { + url: "https://github.com/Andreas12321/Est-Cert-Final" + owner: "Andreas12321" + framework: FRAMEWORK_TENSORFLOW + } + repositories: { + url: "https://github.com/Jonathan-Pearce/calibration_library" + owner: "Jonathan-Pearce" + framework: FRAMEWORK_PYTORCH + number_of_stars: 7 + description: "Pytorch library for model calibration metrics and visualizations as well as recalibration methods. In progress!" + } + repositories: { + url: "https://github.com/Eric-Wallace/deep-knn" + owner: "Eric-Wallace" + framework: FRAMEWORK_PYTORCH + number_of_stars: 32 + description: "Code for the 2018 EMNLP Interpretability Workshop Paper \"Interpreting Neural Networks with Nearest Neighbors\"" + } + } + papers: { + paper_id: "revisiting-the-calibration-of-modern-neural" + title: "Revisiting the Calibration of Modern Neural Networks" + arxiv_id: "2106.07998" + abstract: "Accurate estimation of predictive uncertainty (model calibration) is essential for the safe application of neural networks. Many instances of miscalibration in modern neural networks have been reported, suggesting a trend that newer, more accurate models produce poorly calibrated predictions. Here, we revisit this question for recent state-of-the-art image classification models. We systematically relate model calibration and accuracy, and find that the most recent models, notably those not using convolutions, are among the best calibrated. Trends observed in prior model generations, such as decay of calibration with distribution shift or model size, are less pronounced in recent architectures. We also show that model size and amount of pretraining do not fully explain these differences, suggesting that architecture is a major determinant of calibration properties." + published_date: { + seconds: 1623715200 + } + authors: "Matthias Minderer" + authors: "Josip Djolonga" + authors: "Rob Romijnders" + authors: "Frances Hubis" + authors: "Xiaohua Zhai" + authors: "Neil Houlsby" + authors: "Dustin Tran" + authors: "Mario Lucic" + } + video: { + video_id: "rI-vJuNKyIU" + video_title: "PR-331: Revisiting the Calibration of Modern Neural Networks" + number_of_likes: 7 + number_of_views: 161 + published_date: { + seconds: 1626015278 + } + uploader: "Sungchul Kim" + } + } +}