From df0b6a9f9f4ebd7d464178f55001481d00b62bab Mon Sep 17 00:00:00 2001 From: Ram81 Date: Sun, 20 Jun 2021 20:20:12 +0530 Subject: [PATCH] Add multi metric leaderboard --- ...d_enable_multi_metric_leaderboard_field.py | 18 +++++++++++++ apps/challenges/models.py | 2 ++ apps/challenges/serializers.py | 8 ++++++ apps/jobs/utils.py | 12 ++++++++- apps/jobs/views.py | 6 +++-- frontend/src/js/controllers/challengeCtrl.js | 27 ++++++++++++++----- frontend/src/js/route-config/route-config.js | 10 +++++++ .../src/views/web/challenge/leaderboard.html | 11 ++++++++ tests/unit/challenges/test_views.py | 19 +++++++++++++ tests/unit/participants/test_views.py | 2 ++ 10 files changed, 106 insertions(+), 9 deletions(-) create mode 100644 apps/challenges/migrations/0085_add_enable_multi_metric_leaderboard_field.py diff --git a/apps/challenges/migrations/0085_add_enable_multi_metric_leaderboard_field.py b/apps/challenges/migrations/0085_add_enable_multi_metric_leaderboard_field.py new file mode 100644 index 0000000000..fd46efe4e2 --- /dev/null +++ b/apps/challenges/migrations/0085_add_enable_multi_metric_leaderboard_field.py @@ -0,0 +1,18 @@ +# Generated by Django 2.2.13 on 2021-06-20 14:33 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("challenges", "0084_challenge_is_static_dataset_code_upload"), + ] + + operations = [ + migrations.AddField( + model_name="challenge", + name="is_multi_metric_leaderboard", + field=models.BooleanField(default=False), + ), + ] diff --git a/apps/challenges/models.py b/apps/challenges/models.py index 9467f93195..81f97186cc 100644 --- a/apps/challenges/models.py +++ b/apps/challenges/models.py @@ -172,6 +172,8 @@ def __init__(self, *args, **kwargs): desired_worker_instance = models.IntegerField( null=True, blank=True, default=1 ) + # Allow ordering leaderboard by all metrics + is_multi_metric_leaderboard = models.BooleanField(default=False) class Meta: app_label = "challenges" diff --git a/apps/challenges/serializers.py b/apps/challenges/serializers.py index 5562f680d5..b9855a504b 100644 --- a/apps/challenges/serializers.py +++ b/apps/challenges/serializers.py @@ -63,6 +63,7 @@ class Meta: "cli_version", "remote_evaluation", "workers", + "is_multi_metric_leaderboard", ) @@ -141,6 +142,7 @@ class ChallengePhaseSplitSerializer(serializers.ModelSerializer): dataset_split_name = serializers.SerializerMethodField() challenge_phase_name = serializers.SerializerMethodField() + leaderboard_schema = serializers.SerializerMethodField() class Meta: model = ChallengePhaseSplit @@ -153,8 +155,13 @@ class Meta: "visibility", "show_leaderboard_by_latest_submission", "show_execution_time", + "leaderboard_schema", ) + def get_leaderboard_schema(self, obj): + print(obj.leaderboard.schema) + return obj.leaderboard.schema + def get_dataset_split_name(self, obj): return obj.dataset_split.name @@ -258,6 +265,7 @@ class Meta: "max_worker_instance", "min_worker_instance", "desired_worker_instance", + "is_multi_metric_leaderboard", ) diff --git a/apps/jobs/utils.py b/apps/jobs/utils.py index f31737e030..ba7916acfe 100644 --- a/apps/jobs/utils.py +++ b/apps/jobs/utils.py @@ -234,7 +234,7 @@ def handle_submission_rerun(submission, updated_status): def calculate_distinct_sorted_leaderboard_data( - user, challenge_obj, challenge_phase_split, only_public_entries + user, challenge_obj, challenge_phase_split, only_public_entries, order_by ): """ Function to calculate and return the sorted leaderboard data @@ -253,6 +253,7 @@ def calculate_distinct_sorted_leaderboard_data( leaderboard = challenge_phase_split.leaderboard # Get the default order by key to rank the entries on the leaderboard + default_order_by = None try: default_order_by = leaderboard.schema["default_order_by"] except KeyError: @@ -260,6 +261,15 @@ def calculate_distinct_sorted_leaderboard_data( "error": "Sorry, default_order_by key is missing in leaderboard schema!" } return response_data, status.HTTP_400_BAD_REQUEST + # Use order by field from request only if it is valid + try: + if order_by in leaderboard.schema["labels"]: + default_order_by = order_by + except KeyError: + response_data = { + "error": "Sorry, labels key is missing in leaderboard schema!" + } + return response_data, status.HTTP_400_BAD_REQUEST # Exclude the submissions done by members of the host team # while populating leaderboard diff --git a/apps/jobs/views.py b/apps/jobs/views.py index ac546cdb89..386d7bf87a 100644 --- a/apps/jobs/views.py +++ b/apps/jobs/views.py @@ -504,7 +504,7 @@ def change_submission_data_and_visibility( @swagger_auto_schema( - methods=["get"], + methods=["get", "post"], manual_parameters=[ openapi.Parameter( name="challenge_phase_split_id", @@ -574,7 +574,7 @@ def change_submission_data_and_visibility( ) }, ) -@api_view(["GET"]) +@api_view(["GET", "POST"]) @throttle_classes([AnonRateThrottle]) def leaderboard(request, challenge_phase_split_id): """ @@ -592,6 +592,7 @@ def leaderboard(request, challenge_phase_split_id): challenge_phase_split_id ) challenge_obj = challenge_phase_split.challenge_phase.challenge + order_by = request.data.get("order_by") ( response_data, http_status_code, @@ -600,6 +601,7 @@ def leaderboard(request, challenge_phase_split_id): challenge_obj, challenge_phase_split, only_public_entries=True, + order_by=order_by, ) # The response 400 will be returned if the leaderboard isn't public or `default_order_by` key is missing in leaderboard. if http_status_code == status.HTTP_400_BAD_REQUEST: diff --git a/frontend/src/js/controllers/challengeCtrl.js b/frontend/src/js/controllers/challengeCtrl.js index 361aa5cc5f..f396858187 100644 --- a/frontend/src/js/controllers/challengeCtrl.js +++ b/frontend/src/js/controllers/challengeCtrl.js @@ -24,12 +24,15 @@ vm.projectUrl = ""; vm.publicationUrl = ""; vm.isPublicSubmission = null; + vm.isMultiMetricLeaderboardEnabled = false; vm.wrnMsg = {}; vm.page = {}; vm.isParticipated = false; vm.isActive = false; vm.phases = {}; vm.phaseSplits = {}; + vm.orderLeaderboardBy = decodeURIComponent($stateParams.metric); + vm.phaseSplitLeaderboardSchema = {}; vm.submissionMetaAttributes = []; // Stores the attributes format and phase ID for all the phases of a challenge. vm.metaAttributesforCurrentSubmission = null; // Stores the attributes while making a submission for a selected phase. vm.selectedPhaseSplit = {}; @@ -309,6 +312,7 @@ vm.isRegistrationOpen = details.is_registration_open; vm.approved_by_admin = details.approved_by_admin; vm.isRemoteChallenge = details.remote_evaluation; + vm.isMultiMetricLeaderboardEnabled = details.is_multi_metric_leaderboard; vm.getTeamName(vm.challengeId); if (vm.page.image === null) { @@ -825,6 +829,7 @@ vm.phaseSplits[i].showPrivate = true; vm.showPrivateIds.push(vm.phaseSplits[i].id); } + vm.phaseSplitLeaderboardSchema[vm.phaseSplits[i].id] = vm.phaseSplits[i].leaderboard_schema; } utilities.hideLoader(); }, @@ -879,8 +884,10 @@ vm.stopLeaderboard(); vm.poller = $interval(function() { parameters.url = "jobs/" + "challenge_phase_split/" + vm.phaseSplitId + "/leaderboard/?page_size=1000"; - parameters.method = 'GET'; - parameters.data = {}; + parameters.method = 'POST'; + parameters.data = { + "order_by": vm.orderLeaderboardBy + }; parameters.callback = { onSuccess: function(response) { var details = response.data; @@ -937,8 +944,10 @@ // Show leaderboard vm.leaderboard = {}; parameters.url = "jobs/" + "challenge_phase_split/" + vm.phaseSplitId + "/leaderboard/?page_size=1000"; - parameters.method = 'GET'; - parameters.data = {}; + parameters.method = 'POST'; + parameters.data = { + "order_by": vm.orderLeaderboardBy + }; parameters.callback = { onSuccess: function(response) { var details = response.data; @@ -1356,8 +1365,10 @@ vm.startLoader("Loading Leaderboard Items"); vm.leaderboard = {}; parameters.url = "jobs/" + "challenge_phase_split/" + vm.phaseSplitId + "/leaderboard/?page_size=1000"; - parameters.method = 'GET'; - parameters.data = {}; + parameters.method = 'POST'; + parameters.data = { + "order_by": vm.orderLeaderboardBy + }; parameters.callback = { onSuccess: function(response) { var details = response.data; @@ -2634,6 +2645,10 @@ } }; + vm.encodeMetricURI = function(metric) { + return encodeURIComponent(metric); + } + } })(); diff --git a/frontend/src/js/route-config/route-config.js b/frontend/src/js/route-config/route-config.js index abddc5432a..acbcebb256 100644 --- a/frontend/src/js/route-config/route-config.js +++ b/frontend/src/js/route-config/route-config.js @@ -286,6 +286,15 @@ title: 'Leaderboard' }; + var challenge_phase_metric_leaderboard = { + name: "web.challenge-main.challenge-page.phase-metric-leaderboard", + url: "/leaderboard/:phaseSplitId/:metric", + controller: 'ChallengeCtrl', + controllerAs: 'challenge', + templateUrl: baseUrl + "/web/challenge/leaderboard.html", + title: 'Leaderboard' + }; + var profile = { name: "web.profile", parent: "web", @@ -503,6 +512,7 @@ $stateProvider.state(my_challenge_all_submission); $stateProvider.state(leaderboard); $stateProvider.state(challenge_phase_leaderboard); + $stateProvider.state(challenge_phase_metric_leaderboard); // featured challenge details $stateProvider.state(featured_challenge_page); diff --git a/frontend/src/views/web/challenge/leaderboard.html b/frontend/src/views/web/challenge/leaderboard.html index d37837c3d1..f57a12362c 100644 --- a/frontend/src/views/web/challenge/leaderboard.html +++ b/frontend/src/views/web/challenge/leaderboard.html @@ -40,6 +40,17 @@
Leaderboard
+
+
+ + + + {{key}} + + +
+
diff --git a/tests/unit/challenges/test_views.py b/tests/unit/challenges/test_views.py index 2ccfa4762e..639aeef925 100644 --- a/tests/unit/challenges/test_views.py +++ b/tests/unit/challenges/test_views.py @@ -173,6 +173,7 @@ def test_get_challenge(self): "cli_version": self.challenge.cli_version, "remote_evaluation": self.challenge.remote_evaluation, "workers": self.challenge.workers, + "is_multi_metric_leaderboard": self.challenge.is_multi_metric_leaderboard, } ] @@ -321,6 +322,7 @@ def test_get_particular_challenge(self): "cli_version": self.challenge.cli_version, "remote_evaluation": self.challenge.remote_evaluation, "workers": self.challenge.workers, + "is_multi_metric_leaderboard": self.challenge.is_multi_metric_leaderboard, } response = self.client.get(self.url, {}) self.assertEqual(response.data, expected) @@ -396,6 +398,7 @@ def test_update_challenge_when_user_is_its_creator(self): "cli_version": self.challenge.cli_version, "remote_evaluation": self.challenge.remote_evaluation, "workers": self.challenge.workers, + "is_multi_metric_leaderboard": self.challenge.is_multi_metric_leaderboard, } response = self.client.put( self.url, {"title": new_title, "description": new_description} @@ -497,6 +500,7 @@ def test_particular_challenge_partial_update(self): "cli_version": self.challenge.cli_version, "remote_evaluation": self.challenge.remote_evaluation, "workers": self.challenge.workers, + "is_multi_metric_leaderboard": self.challenge.is_multi_metric_leaderboard, } response = self.client.patch(self.url, self.partial_update_data) self.assertEqual(response.data, expected) @@ -547,6 +551,7 @@ def test_particular_challenge_update(self): "cli_version": self.challenge.cli_version, "remote_evaluation": self.challenge.remote_evaluation, "workers": self.challenge.workers, + "is_multi_metric_leaderboard": self.challenge.is_multi_metric_leaderboard, } response = self.client.put(self.url, self.data) self.assertEqual(response.data, expected) @@ -1058,6 +1063,7 @@ def test_get_past_challenges(self): "cli_version": self.challenge3.cli_version, "remote_evaluation": self.challenge3.remote_evaluation, "workers": self.challenge3.workers, + "is_multi_metric_leaderboard": self.challenge3.is_multi_metric_leaderboard, } ] response = self.client.get(self.url, {}, format="json") @@ -1110,6 +1116,7 @@ def test_get_present_challenges(self): "cli_version": self.challenge2.cli_version, "remote_evaluation": self.challenge2.remote_evaluation, "workers": self.challenge2.workers, + "is_multi_metric_leaderboard": self.challenge2.is_multi_metric_leaderboard, } ] response = self.client.get(self.url, {}, format="json") @@ -1162,6 +1169,7 @@ def test_get_future_challenges(self): "cli_version": self.challenge4.cli_version, "remote_evaluation": self.challenge4.remote_evaluation, "workers": self.challenge4.workers, + "is_multi_metric_leaderboard": self.challenge4.is_multi_metric_leaderboard, } ] response = self.client.get(self.url, {}, format="json") @@ -1213,6 +1221,7 @@ def test_get_all_challenges(self): "cli_version": self.challenge4.cli_version, "remote_evaluation": self.challenge4.remote_evaluation, "workers": self.challenge4.workers, + "is_multi_metric_leaderboard": self.challenge4.is_multi_metric_leaderboard, }, { "id": self.challenge3.pk, @@ -1253,6 +1262,7 @@ def test_get_all_challenges(self): "cli_version": self.challenge3.cli_version, "remote_evaluation": self.challenge3.remote_evaluation, "workers": self.challenge3.workers, + "is_multi_metric_leaderboard": self.challenge3.is_multi_metric_leaderboard, }, { "id": self.challenge2.pk, @@ -1293,6 +1303,7 @@ def test_get_all_challenges(self): "cli_version": self.challenge2.cli_version, "remote_evaluation": self.challenge2.remote_evaluation, "workers": self.challenge2.workers, + "is_multi_metric_leaderboard": self.challenge2.is_multi_metric_leaderboard, }, ] response = self.client.get(self.url, {}, format="json") @@ -1394,6 +1405,7 @@ def test_get_featured_challenges(self): "cli_version": self.challenge3.cli_version, "remote_evaluation": self.challenge3.remote_evaluation, "workers": self.challenge3.workers, + "is_multi_metric_leaderboard": self.challenge3.is_multi_metric_leaderboard, } ] response = self.client.get(self.url, {}, format="json") @@ -1522,6 +1534,7 @@ def test_get_challenge_by_pk_when_user_is_challenge_host(self): "cli_version": self.challenge3.cli_version, "remote_evaluation": self.challenge3.remote_evaluation, "workers": self.challenge3.workers, + "is_multi_metric_leaderboard": self.challenge3.is_multi_metric_leaderboard, } response = self.client.get(self.url, {}) @@ -1586,6 +1599,7 @@ def test_get_challenge_by_pk_when_user_is_participant(self): "cli_version": self.challenge4.cli_version, "remote_evaluation": self.challenge4.remote_evaluation, "workers": self.challenge4.workers, + "is_multi_metric_leaderboard": self.challenge4.is_multi_metric_leaderboard, } self.client.force_authenticate(user=self.user1) @@ -1706,6 +1720,7 @@ def test_get_challenge_when_host_team_is_given(self): "cli_version": self.challenge2.cli_version, "remote_evaluation": self.challenge2.remote_evaluation, "workers": self.challenge2.workers, + "is_multi_metric_leaderboard": self.challenge2.is_multi_metric_leaderboard, } ] @@ -1758,6 +1773,7 @@ def test_get_challenge_when_participant_team_is_given(self): "cli_version": self.challenge2.cli_version, "remote_evaluation": self.challenge2.remote_evaluation, "workers": self.challenge2.workers, + "is_multi_metric_leaderboard": self.challenge2.is_multi_metric_leaderboard, } ] @@ -1810,6 +1826,7 @@ def test_get_challenge_when_mode_is_participant(self): "cli_version": self.challenge2.cli_version, "remote_evaluation": self.challenge2.remote_evaluation, "workers": self.challenge2.workers, + "is_multi_metric_leaderboard": self.challenge2.is_multi_metric_leaderboard, } ] @@ -1860,6 +1877,7 @@ def test_get_challenge_when_mode_is_host(self): "cli_version": self.challenge.cli_version, "remote_evaluation": self.challenge.remote_evaluation, "workers": self.challenge.workers, + "is_multi_metric_leaderboard": self.challenge.is_multi_metric_leaderboard, }, { "id": self.challenge2.pk, @@ -1900,6 +1918,7 @@ def test_get_challenge_when_mode_is_host(self): "cli_version": self.challenge2.cli_version, "remote_evaluation": self.challenge2.remote_evaluation, "workers": self.challenge2.workers, + "is_multi_metric_leaderboard": self.challenge2.is_multi_metric_leaderboard, }, ] diff --git a/tests/unit/participants/test_views.py b/tests/unit/participants/test_views.py index 33e201be26..12e0d60d94 100644 --- a/tests/unit/participants/test_views.py +++ b/tests/unit/participants/test_views.py @@ -814,6 +814,7 @@ def test_get_teams_and_corresponding_challenges_for_a_participant(self): "cli_version": self.challenge1.cli_version, "remote_evaluation": self.challenge1.remote_evaluation, "workers": self.challenge1.workers, + "is_multi_metric_leaderboard": self.challenge1.is_multi_metric_leaderboard, }, "participant_team": { "id": self.participant_team.id, @@ -881,6 +882,7 @@ def test_get_participant_team_challenge_list(self): "cli_version": self.challenge1.cli_version, "remote_evaluation": self.challenge1.remote_evaluation, "workers": self.challenge1.workers, + "is_multi_metric_leaderboard": self.challenge1.is_multi_metric_leaderboard, } ]