Skip to content

Commit

Permalink
Try to don't fetch again the first page when requesting a channel con…
Browse files Browse the repository at this point in the history
…tinuation

Try to don't fetch again the first page of a YouTube channel when requesting a continuation of it by trying to store the channel name and the channel id into the next page using the ids field of the Page class.
  • Loading branch information
AudricV committed Jul 31, 2021
1 parent d820053 commit e0011de
Showing 1 changed file with 17 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.localization.Localization;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
import org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeChannelLinkHandlerFactory;
Expand All @@ -24,6 +23,7 @@
import org.schabi.newpipe.extractor.utils.Utils;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
Expand Down Expand Up @@ -329,10 +329,13 @@ public InfoItemsPage<StreamInfoItem> getInitialPage() throws IOException, Extrac
.getArray("contents").getObject(0).getObject("itemSectionRenderer")
.getArray("contents").getObject(0).getObject("gridRenderer");

final List<String> channelInformations = new ArrayList<>();
channelInformations.add(getName());
channelInformations.add(getUrl());
final JsonObject continuation = collectStreamsFrom(collector, gridRenderer
.getArray("items"));
.getArray("items"), channelInformations);

nextPage = getNextPageFrom(continuation);
nextPage = getNextPageFrom(continuation, channelInformations);
}

return new InfoItemsPage<>(collector, nextPage);
Expand All @@ -345,10 +348,7 @@ public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException
throw new IllegalArgumentException("Page doesn't contain an URL");
}

// Unfortunately, we have to fetch the page even if we are only getting next streams,
// as they don't deliver enough information on their own (the channel name, for example).

if (!isPageFetched()) fetchPage();
final List<String> channelInformations = page.getIds();

final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
final Map<String, List<String>> headers = new HashMap<>();
Expand All @@ -364,12 +364,13 @@ public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException
.getObject("appendContinuationItemsAction");

final JsonObject continuation = collectStreamsFrom(collector, sectionListContinuation
.getArray("continuationItems"));
.getArray("continuationItems"), channelInformations);

return new InfoItemsPage<>(collector, getNextPageFrom(continuation));
return new InfoItemsPage<>(collector, getNextPageFrom(continuation, channelInformations));
}

private Page getNextPageFrom(final JsonObject continuations) throws IOException,
private Page getNextPageFrom(final JsonObject continuations,
final List<String> channelInformations) throws IOException,
ExtractionException {
if (isNullOrEmpty(continuations)) {
return null;
Expand All @@ -385,7 +386,8 @@ private Page getNextPageFrom(final JsonObject continuations) throws IOException,
.done())
.getBytes(UTF_8);

return new Page(YOUTUBEI_V1_URL + "browse?key=" + getKey(), body);
return new Page(YOUTUBEI_V1_URL + "browse?key=" + getKey(), null, channelInformations,
null, body);
}

/**
Expand All @@ -394,14 +396,14 @@ private Page getNextPageFrom(final JsonObject continuations) throws IOException,
* @param collector the collector where videos will be commited
* @param videos the array to get videos from
* @return the continuation object
* @throws ParsingException if an error happened while extracting
*/
private JsonObject collectStreamsFrom(final StreamInfoItemsCollector collector,
final JsonArray videos) throws ParsingException {
final JsonArray videos,
final List<String> channelInformations) {
collector.reset();

final String uploaderName = getName();
final String uploaderUrl = getUrl();
final String uploaderName = channelInformations.get(0);
final String uploaderUrl = channelInformations.get(1);
final TimeAgoParser timeAgoParser = getTimeAgoParser();

JsonObject continuation = null;
Expand Down

0 comments on commit e0011de

Please sign in to comment.