diff --git a/apps/challenges/migrations/0085_add_enable_multi_metric_leaderboard_field.py b/apps/challenges/migrations/0086_add_is_multi_metric_leaderboard_field.py similarity index 51% rename from apps/challenges/migrations/0085_add_enable_multi_metric_leaderboard_field.py rename to apps/challenges/migrations/0086_add_is_multi_metric_leaderboard_field.py index fd46efe4e2..61d0e6e228 100644 --- a/apps/challenges/migrations/0085_add_enable_multi_metric_leaderboard_field.py +++ b/apps/challenges/migrations/0086_add_is_multi_metric_leaderboard_field.py @@ -1,4 +1,4 @@ -# Generated by Django 2.2.13 on 2021-06-20 14:33 +# Generated by Django 2.2.20 on 2021-08-07 14:19 from django.db import migrations, models @@ -6,13 +6,13 @@ class Migration(migrations.Migration): dependencies = [ - ("challenges", "0084_challenge_is_static_dataset_code_upload"), + ("challenges", "0085_challenge_submission_time_limit"), ] operations = [ migrations.AddField( - model_name="challenge", + model_name="challengephasesplit", name="is_multi_metric_leaderboard", - field=models.BooleanField(default=False), + field=models.BooleanField(default=True), ), ] diff --git a/apps/challenges/models.py b/apps/challenges/models.py index 1fe124a710..3732aa7ec2 100644 --- a/apps/challenges/models.py +++ b/apps/challenges/models.py @@ -173,8 +173,6 @@ def __init__(self, *args, **kwargs): desired_worker_instance = models.IntegerField( null=True, blank=True, default=1 ) - # Allow ordering leaderboard by all metrics - is_multi_metric_leaderboard = models.BooleanField(default=False) class Meta: app_label = "challenges" @@ -404,6 +402,8 @@ class ChallengePhaseSplit(TimeStampedModel): is_leaderboard_order_descending = models.BooleanField(default=True) show_leaderboard_by_latest_submission = models.BooleanField(default=False) show_execution_time = models.BooleanField(default=False) + # Allow ordering leaderboard by all metrics + is_multi_metric_leaderboard = models.BooleanField(default=True) def __str__(self): return "{0} : {1}".format( diff --git a/apps/challenges/serializers.py b/apps/challenges/serializers.py index 2d7d394ff0..d6047606d7 100644 --- a/apps/challenges/serializers.py +++ b/apps/challenges/serializers.py @@ -158,10 +158,10 @@ class Meta: "show_leaderboard_by_latest_submission", "show_execution_time", "leaderboard_schema", + "is_multi_metric_leaderboard", ) def get_leaderboard_schema(self, obj): - print(obj.leaderboard.schema) return obj.leaderboard.schema def get_dataset_split_name(self, obj): @@ -268,7 +268,6 @@ class Meta: "max_worker_instance", "min_worker_instance", "desired_worker_instance", - "is_multi_metric_leaderboard", ) diff --git a/apps/challenges/views.py b/apps/challenges/views.py index 1397f6142b..89d670f4d8 100644 --- a/apps/challenges/views.py +++ b/apps/challenges/views.py @@ -1246,7 +1246,6 @@ def create_challenge_using_zip_file(request, challenge_host_team_pk): challenge_phase_data[ field ] = challenge_phase_data_from_hosts.get(field) - try: with transaction.atomic(): serializer = ZipChallengeSerializer( diff --git a/apps/jobs/utils.py b/apps/jobs/utils.py index d82e04a187..306a56deb8 100644 --- a/apps/jobs/utils.py +++ b/apps/jobs/utils.py @@ -254,6 +254,9 @@ def calculate_distinct_sorted_leaderboard_data( # Get the default order by key to rank the entries on the leaderboard default_order_by = None + is_leaderboard_order_descending = ( + challenge_phase_split.is_leaderboard_order_descending + ) try: default_order_by = leaderboard.schema["default_order_by"] except KeyError: @@ -271,6 +274,19 @@ def calculate_distinct_sorted_leaderboard_data( } return response_data, status.HTTP_400_BAD_REQUEST + leaderboard_schema = leaderboard.schema + if ( + leaderboard_schema.get("metadata") is not None + and leaderboard_schema.get("metadata").get(default_order_by) + is not None + ): + is_leaderboard_order_descending = ( + leaderboard_schema["metadata"][default_order_by].get( + "sort_ascending" + ) + is False + ) + # Exclude the submissions done by members of the host team # while populating leaderboard challenge_hosts_emails = ( @@ -408,9 +424,7 @@ def calculate_distinct_sorted_leaderboard_data( float(k["filtering_score"]), float(-k["filtering_error"]), ), - reverse=True - if challenge_phase_split.is_leaderboard_order_descending - else False, + reverse=True if is_leaderboard_order_descending else False, ) distinct_sorted_leaderboard_data = [] team_list = [] diff --git a/frontend/src/js/controllers/challengeCtrl.js b/frontend/src/js/controllers/challengeCtrl.js index 0e49456b9d..85a4691bd0 100644 --- a/frontend/src/js/controllers/challengeCtrl.js +++ b/frontend/src/js/controllers/challengeCtrl.js @@ -24,7 +24,7 @@ vm.projectUrl = ""; vm.publicationUrl = ""; vm.isPublicSubmission = null; - vm.isMultiMetricLeaderboardEnabled = false; + vm.isMultiMetricLeaderboardEnabled = {}; vm.wrnMsg = {}; vm.page = {}; vm.isParticipated = false; @@ -316,7 +316,6 @@ vm.isRegistrationOpen = details.is_registration_open; vm.approved_by_admin = details.approved_by_admin; vm.isRemoteChallenge = details.remote_evaluation; - vm.isMultiMetricLeaderboardEnabled = details.is_multi_metric_leaderboard; vm.getTeamName(vm.challengeId); if (vm.page.image === null) { @@ -833,6 +832,7 @@ vm.phaseSplits[i].showPrivate = true; vm.showPrivateIds.push(vm.phaseSplits[i].id); } + vm.isMultiMetricLeaderboardEnabled[vm.phaseSplits[i].id] = vm.phaseSplits[i].is_multi_metric_leaderboard; vm.phaseSplitLeaderboardSchema[vm.phaseSplits[i].id] = vm.phaseSplits[i].leaderboard_schema; } utilities.hideLoader(); diff --git a/frontend/src/views/web/challenge/leaderboard.html b/frontend/src/views/web/challenge/leaderboard.html index bd477757ca..8657a9d25f 100644 --- a/frontend/src/views/web/challenge/leaderboard.html +++ b/frontend/src/views/web/challenge/leaderboard.html @@ -40,7 +40,7 @@