Skip to content

Commit

Permalink
Merge pull request #3422 from rebeccacremona/tweak-no-pageinfo
Browse files Browse the repository at this point in the history
Tweak to "no PageInfo" check
  • Loading branch information
rebeccacremona authored Oct 30, 2023
2 parents eecba3f + 88663db commit d8def7c
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 16 deletions.
54 changes: 39 additions & 15 deletions perma_web/perma/admin.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,16 @@ def queryset(self, request, queryset):
return queryset.filter(link__guid__icontains=value)


class ScoopJobIDFilter(InputFilter):
parameter_name = 'scoop_job_id'
title = 'Scoop Job ID'

def queryset(self, request, queryset):
value = self.value()
if value:
return queryset.filter(scoop_logs__id_capture=value)


class TagFilter(InputFilter):
parameter_name = 'tag'
title = 'tag'
Expand All @@ -102,6 +112,15 @@ def queryset(self, request, queryset):
if value:
return queryset.filter(tags__name__icontains=value)

class LinkTagFilter(InputFilter):
parameter_name = 'linktag'
title = 'link tag'

def queryset(self, request, queryset):
value = self.value()
if value:
return queryset.filter(link__tags__name__icontains=value)


class MessageFilter(InputFilter):
parameter_name = 'message'
Expand Down Expand Up @@ -568,13 +587,13 @@ class LinkAdmin(SimpleHistoryAdmin):
list_display = ['guid', 'submitted_url', 'created_by', 'creation_timestamp', 'tag_list', 'is_private', 'user_deleted', 'cached_can_play_back', 'captured_by_software', 'internet_archive_upload_status', 'file_size']
list_filter = [GUIDFilter, CreatedByFilter, SubmittedURLFilter, TagFilter, 'cached_can_play_back', 'captured_by_software', 'internet_archive_upload_status']
fieldsets = (
(None, {'fields': ('guid', 'submitted_url', 'submitted_url_surt','submitted_title', 'submitted_description', 'created_by', 'creation_timestamp', 'captured_by_software', 'captured_by_browser', 'file_size', 'replacement_link', 'tags')}),
(None, {'fields': ('guid', 'capture_job', 'submitted_url', 'submitted_url_surt','submitted_title', 'submitted_description', 'created_by', 'creation_timestamp', 'captured_by_software', 'captured_by_browser', 'file_size', 'replacement_link', 'tags')}),
('Visibility', {'fields': ('is_private', 'private_reason', 'is_unlisted',)}),
('User Delete', {'fields': ('user_deleted', 'user_deleted_timestamp',)}),
('Organization', {'fields': ('folders', 'notes')}),
('Mirroring', {'fields': ('archive_timestamp', 'internet_archive_upload_status', 'cached_can_play_back')}),
)
readonly_fields = ['guid', 'folders', 'creation_timestamp', 'file_size', 'captured_by_software', 'captured_by_browser', 'archive_timestamp']
readonly_fields = ['guid', 'capture_job', 'folders', 'creation_timestamp', 'file_size', 'captured_by_software', 'captured_by_browser', 'archive_timestamp']
inlines = [
new_class("CaptureInline", admin.TabularInline, model=Capture,
fields=['role', 'status', 'url', 'content_type', 'record_type', 'user_upload'],
Expand Down Expand Up @@ -633,8 +652,8 @@ def __init__(self, *args, **kwargs):


class CaptureJobAdmin(admin.ModelAdmin):
list_display = ['id', 'engine', 'status', 'superseded', 'message', 'created_by_id', 'link_id', 'human', 'submitted_url', 'capture_time', 'scoop_time', 'scoop_state']
list_filter = ['engine', CreatedByFilter, LinkIDFilter, 'status', MessageFilter, 'superseded', JobWithDeletedLinkFilter, 'scoop_state']
list_display = ['id', 'engine', 'status', 'superseded', 'message', 'created_by_id', 'link_id', 'human', 'submitted_url', 'scoop_state', 'scoop_job_id']
list_filter = ['engine', CreatedByFilter, LinkIDFilter, 'status', LinkTagFilter, MessageFilter, 'superseded', JobWithDeletedLinkFilter, 'scoop_state', ScoopJobIDFilter]
raw_id_fields = ['link', 'created_by', 'link_batch']

paginator = FasterAdminPaginator
Expand All @@ -649,20 +668,25 @@ def link_creation_timestamp(self, obj):
return obj.link.creation_timestamp
return None

def link_taglist(self, obj):
if obj.link:
return ", ".join(o.name for o in obj.link.tags.all())
def scoop_job_id(self, obj):
if obj.scoop_logs:
return obj.scoop_logs['id_capture']
return None

def capture_time(self, obj):
if obj.capture_start_time and obj.capture_end_time:
return obj.capture_end_time - obj.capture_start_time
return None
# def link_taglist(self, obj):
# if obj.link:
# return ", ".join(o.name for o in obj.link.tags.all())
# return None

def scoop_time(self, obj):
if obj.scoop_start_time and obj.scoop_end_time:
return obj.scoop_end_time - obj.scoop_start_time
return None
# def capture_time(self, obj):
# if obj.capture_start_time and obj.capture_end_time:
# return obj.capture_end_time - obj.capture_start_time
# return None

# def scoop_time(self, obj):
# if obj.scoop_start_time and obj.scoop_end_time:
# return obj.scoop_end_time - obj.scoop_start_time
# return None


class LinkBatchAdmin(admin.ModelAdmin):
Expand Down
2 changes: 1 addition & 1 deletion perma_web/perma/celery_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -829,7 +829,7 @@ def save_scoop_capture(link, capture_job, data):
link.primary_capture.content_type = data['scoop_capture_summary']['targetUrlContentType']
link.primary_capture.save(update_fields=['content_type'])

if data['scoop_capture_summary'].get('pageInfo'):
if 'pageInfo' in data['scoop_capture_summary']:
title = data['scoop_capture_summary']['pageInfo'].get('title')
if title and link.submitted_title == link.get_default_title():
link.submitted_title = title[:2100]
Expand Down

0 comments on commit d8def7c

Please sign in to comment.