1
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2025-03-09 12:50:23 -05:00

Separated class for channel page

This commit is contained in:
kclauhk 2024-07-19 02:45:36 +08:00 committed by kclauhk
parent ec1d3c97d0
commit 49a3edd276

View file

@ -53,8 +53,8 @@ def _extract_info(self, gif_data, video_id):
self._merge_subtitles(dash_subs, target=subtitles) self._merge_subtitles(dash_subs, target=subtitles)
if data := gif_data.get('images'): if data := gif_data.get('images'):
if data.get('looping'): if data.get('looping'):
data['looping']['height'] = traverse_obj(data, ('original_mp4', 'height'), {int}) data['looping']['height'] = traverse_obj(data, ('original_mp4', 'height', {int_or_none}))
data['looping']['width'] = traverse_obj(data, ('original_mp4', 'width'), {int}) data['looping']['width'] = traverse_obj(data, ('original_mp4', 'width', {int_or_none}))
sorted_data = dict(sorted(data.items(), reverse=True)) sorted_data = dict(sorted(data.items(), reverse=True))
formats.extend(self._extract_formats(sorted_data)) formats.extend(self._extract_formats(sorted_data))
thumbnails.extend(self._extract_formats(data, is_still=True)) thumbnails.extend(self._extract_formats(data, is_still=True))
@ -70,22 +70,22 @@ def _extract_info(self, gif_data, video_id):
if data := gif_data.get('user'): if data := gif_data.get('user'):
if isinstance(data, dict): if isinstance(data, dict):
uploader = traverse_obj(data, { uploader = traverse_obj(data, {
'uploader': (('display_name', 'name', 'attribution_display_name', 'username'), {str_or_none}, 'uploader': (('display_name', 'name', 'attribution_display_name', 'username'),
{lambda x: x if x else gif_data.get('username')}), {lambda x: x or gif_data.get('username')}),
'uploader_id': ('username', {str_or_none}, {lambda x: x if x else gif_data.get('username')}), 'uploader_id': ('username', {lambda x: x or gif_data.get('username')}),
'uploader_url': (('profile_url', 'website_url'), {str_or_none}, 'uploader_url': (('profile_url', 'website_url'),
{lambda x: f'https://giphy.com{x}' if x[0] == '/' else url_or_none(x)}), {lambda x: f'https://giphy.com{x}' if x and x[0] == '/' else url_or_none(x)}),
}, get_all=False) }, get_all=False)
# basic info # basic info
info = { info = {
**traverse_obj(gif_data, { **traverse_obj(gif_data, {
'id': ('id', {str}, {lambda x: x or video_id}), 'id': ('id', {lambda x: x or video_id}),
'title': ('title', {str_or_none}, {lambda x: x.strip() if x else ''}), 'title': ('title', {lambda x: x.strip() if x else ''}),
'description': ((None, 'video'), ('alt_text', 'description'), {str_or_none}, 'description': ((None, 'video'), ('alt_text', 'description'),
{lambda x: x.strip() if x and not x.startswith('Discover & share') else None}), {lambda x: x.strip() if x and not x.startswith('Discover & share') else None}),
'tags': ('tags', {list}), 'tags': ('tags', {list}),
'age_limit': ('rating', {str_or_none}, {lambda x: 18 if x in ['r', 'nc-17'] else None}), 'age_limit': ('rating', {lambda x: 18 if x in ['r', 'nc-17'] else None}),
'upload_date': (('import_datetime', 'create_datetime'), {str_or_none}, 'upload_date': (('import_datetime', 'create_datetime'),
{lambda x: x[:10].replace('-', '') if x else None}), {lambda x: x[:10].replace('-', '') if x else None}),
}, get_all=False), }, get_all=False),
} }
@ -102,8 +102,8 @@ def _api_channel_feed(self, channel_id):
query_url = f'https://giphy.com/api/v4/channels/{channel_id}/feed/?offset={offset}' query_url = f'https://giphy.com/api/v4/channels/{channel_id}/feed/?offset={offset}'
for _ in itertools.count(1): for _ in itertools.count(1):
search_results = self._download_json(query_url, channel_id, fatal=False, search_results = self._download_json(query_url, channel_id, fatal=False,
note=f'Fetching feed {offset + 1}-{offset + 25}') note=f'Downloading feed data {offset + 1}-{offset + 25}')
if not search_results.get('results'): if not search_results or not search_results.get('results'):
return return
for video in search_results['results']: for video in search_results['results']:
yield { yield {
@ -121,7 +121,7 @@ def _api_channel_feed(self, channel_id):
class GiphyIE(GiphyBaseIE): class GiphyIE(GiphyBaseIE):
_VALID_URL = r'https?://giphy\.com/(?!(?:search)|(?:stories)/)(?:.+[/-])?(?P<id>[^/]+)/?$' _VALID_URL = r'https?://giphy\.com/(?:clips|gifs|stickers|embed)/(?:.+[/-])?(?P<id>[^/]+)/?$'
_TESTS = [{ _TESTS = [{
'url': 'https://giphy.com/gifs/l2JIcQ4UH5SoPtMJi', 'url': 'https://giphy.com/gifs/l2JIcQ4UH5SoPtMJi',
'info_dict': { 'info_dict': {
@ -175,6 +175,16 @@ class GiphyIE(GiphyBaseIE):
'uploader_id': 'southpark', 'uploader_id': 'southpark',
'uploader_url': 'https://giphy.com/southpark', 'uploader_url': 'https://giphy.com/southpark',
}, },
}, {
'url': 'https://giphy.com/stickers/mario-PFxFYEZNUavG8',
'info_dict': {
'id': 'PFxFYEZNUavG8',
'ext': 'mp4',
'title': 'nintendo mario STICKER',
'tags': ['transparent', 'gaming', 'nintendo', 'mario', 'giphynintendos'],
'thumbnail': r're:^https?://.*',
'upload_date': '20160908',
},
}, { }, {
'url': 'https://giphy.com/embed/00xGP4zv8xENZ2tc3Y', 'url': 'https://giphy.com/embed/00xGP4zv8xENZ2tc3Y',
'info_dict': { 'info_dict': {
@ -189,87 +199,24 @@ class GiphyIE(GiphyBaseIE):
'uploader_id': 'netflix', 'uploader_id': 'netflix',
'uploader_url': 'https://giphy.com/netflix/', 'uploader_url': 'https://giphy.com/netflix/',
}, },
}, {
'url': 'https://giphy.com/stickers/mario-PFxFYEZNUavG8',
'info_dict': {
'id': 'PFxFYEZNUavG8',
'ext': 'mp4',
'title': 'nintendo mario STICKER',
'tags': ['transparent', 'gaming', 'nintendo', 'mario', 'giphynintendos'],
'thumbnail': r're:^https?://.*',
'upload_date': '20160908',
},
}, {
'url': 'https://giphy.com/catsmusical/',
'playlist_count': 10,
'info_dict': {
'id': '8707',
'title': 'Cats the Musical',
'uploader_id': 'catsmusical',
'uploader_url': 'https://giphy.com/channel/catsmusical',
},
}, {
'url': 'https://giphy.com/channel/catsmusical',
'playlist_count': 10,
'info_dict': {
'id': '8707',
'title': 'Cats the Musical',
'uploader_id': 'catsmusical',
'uploader_url': 'https://giphy.com/channel/catsmusical',
},
}, {
'url': 'https://giphy.com/southpark/reactions/lol',
'playlist_count': 42,
'info_dict': {
'id': '1044',
'title': 'LOL',
'uploader_id': 'southpark',
'uploader_url': 'https://giphy.com/channel/southpark',
},
}, {
'url': 'https://giphy.com/corgiyolk/cute-and-wholesome-corgi',
'playlist_count': 14,
'info_dict': {
'id': '34458076',
'title': 'Cute and Wholesome corgi',
'uploader_id': 'corgiyolk',
'uploader_url': 'https://giphy.com/channel/corgiyolk',
},
}] }]
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
webpage = self._download_webpage(url.replace('/embed/', '/gifs/'), video_id) webpage = self._download_webpage(url.replace('/embed/', '/gifs/'), video_id)
# {"channelId": ...}
if channel_id := self._html_search_regex(r'\{"channelId":\s*([^\}]+)\}', webpage, 'channel_id', default=None):
uploader_id = self._html_search_meta('twitter:creator', webpage).replace('@', '').lower()
entries = []
for i in self._api_channel_feed(channel_id):
entries.append(i)
return {
'id': channel_id,
'title': (self._html_search_meta('twitter:title', webpage)
or self._og_search_title(webpage)
).replace(' GIFs on GIPHY - Be Animated', '').strip(),
'uploader_id': uploader_id,
'uploader_url': f'https://giphy.com/channel/{uploader_id}' if uploader_id != 'giphy' else None,
'_type': 'playlist',
'entries': entries,
}
else:
title = (self._html_search_meta('twitter:title', webpage, default=None) title = (self._html_search_meta('twitter:title', webpage, default=None)
or self._og_search_title(webpage).replace(' - Find & Share on GIPHY', '').strip()) or self._og_search_title(webpage).replace(' - Find & Share on GIPHY', '').strip())
description = (self._html_search_meta('twitter:description', webpage, default=None) description = (self._html_search_meta('twitter:description', webpage, default=None)
or self._og_search_description(webpage)) or self._og_search_description(webpage))
description = description if not description.startswith('Discover & share') else None description = description if not description.startswith('Discover & share') else None
# \"gif\":{\"type\":\"gif\",...}, gif_data = {}
# search for: \"gif\":{\"type\":\"...},
if json_str := self._html_search_regex(r'\\"\w+\\":({\\"type\\":\\"(?!emoji).*?is_dynamic\\":\w+}),', if json_str := self._html_search_regex(r'\\"\w+\\":({\\"type\\":\\"(?!emoji).*?is_dynamic\\":\w+}),',
webpage, 'video_data', default=None): webpage, 'video_data', default=None):
gif_data = self._parse_json(json_str.encode('utf-8').decode('unicode_escape'), video_id) gif_data = self._parse_json(json_str.encode('utf-8').decode('unicode_escape'), video_id)
# gif: {"id":...}, # search for: gif: {"...},
elif json_str := self._html_search_regex(r'\s+\w+:\s*({".*?}),\n\s+', webpage, 'video_data', default='{}'): elif json_str := self._html_search_regex(r'\s+\w+:\s*({".*?}),\n\s+', webpage, 'video_data', default='{}'):
gif_data = self._parse_json(json_str, video_id) gif_data = self._parse_json(json_str, video_id)
@ -311,11 +258,11 @@ def _real_extract(self, url):
data = self._parse_json(json_str.encode('utf-8').decode('unicode_escape'), video_id, fatal=False) data = self._parse_json(json_str.encode('utf-8').decode('unicode_escape'), video_id, fatal=False)
if isinstance(data, dict): if isinstance(data, dict):
uploader = traverse_obj(data, { uploader = traverse_obj(data, {
'uploader': (('display_name', 'name', 'attribution_display_name', 'username'), {str_or_none}, 'uploader': (('display_name', 'name', 'attribution_display_name', 'username'),
{lambda x: x if x else gif_data.get('username')}), {lambda x: x or gif_data.get('username')}),
'uploader_id': ('username', {str_or_none}), 'uploader_id': ('username', {str_or_none}),
'uploader_url': (('profile_url', 'website_url'), {url_or_none}, 'uploader_url': (('profile_url', 'website_url'),
{lambda x: f'https://giphy.com{x}' if x[0] == '/' else x}), {lambda x: f'https://giphy.com{x}' if x and x[0] == '/' else url_or_none(x)}),
}, get_all=False) }, get_all=False)
if not uploader: if not uploader:
up_id = (gif_data.get('username') up_id = (gif_data.get('username')
@ -341,6 +288,70 @@ def _real_extract(self, url):
} }
class GiphyChannelPageIE(GiphyBaseIE):
_VALID_URL = r'https?://giphy\.com/(?!(?:clips|gifs|stickers|stories|search|embed)/)(?:.+/)?(?P<id>[^/]+)/?$'
_TESTS = [{
'url': 'https://giphy.com/catsmusical/',
'playlist_count': 10,
'info_dict': {
'id': '8707',
'title': 'Cats the Musical',
'uploader_id': 'catsmusical',
'uploader_url': 'https://giphy.com/channel/catsmusical',
},
}, {
'url': 'https://giphy.com/channel/catsmusical',
'playlist_count': 10,
'info_dict': {
'id': '8707',
'title': 'Cats the Musical',
'uploader_id': 'catsmusical',
'uploader_url': 'https://giphy.com/channel/catsmusical',
},
}, {
'url': 'https://giphy.com/southpark/reactions/lol',
'playlist_count': 42,
'info_dict': {
'id': '1044',
'title': 'LOL',
'uploader_id': 'southpark',
'uploader_url': 'https://giphy.com/channel/southpark',
},
}, {
'url': 'https://giphy.com/corgiyolk/cute-and-wholesome-corgi',
'playlist_count': 14,
'info_dict': {
'id': '34458076',
'title': 'Cute and Wholesome corgi',
'uploader_id': 'corgiyolk',
'uploader_url': 'https://giphy.com/channel/corgiyolk',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
# search for: {"channelId": ...} or {..., "channel_id": ...,
if channel_id := self._html_search_regex(r'\{[^\}\n]*"channel_?[iI]d":\s*"?([^",\}]+)[",\}]',
webpage, 'channel_id', default=None):
uploader_id = self._html_search_meta('twitter:creator', webpage).replace('@', '').lower()
entries = []
for i in self._api_channel_feed(channel_id):
entries.append(i)
return {
'id': channel_id,
'title': (self._html_search_meta('twitter:title', webpage)
or self._og_search_title(webpage)
).replace(' GIFs on GIPHY - Be Animated', '').strip(),
'uploader_id': uploader_id,
'uploader_url': f'https://giphy.com/channel/{uploader_id}' if uploader_id != 'giphy' else None,
'_type': 'playlist',
'entries': entries,
}
class GiphyChannelIE(GiphyBaseIE, SearchInfoExtractor): class GiphyChannelIE(GiphyBaseIE, SearchInfoExtractor):
IE_NAME = 'giphy:channel' IE_NAME = 'giphy:channel'
IE_DESC = 'Giphy Channel' IE_DESC = 'Giphy Channel'
@ -356,8 +367,8 @@ class GiphyChannelIE(GiphyBaseIE, SearchInfoExtractor):
def _search_results(self, query): def _search_results(self, query):
if webpage := self._download_webpage(f'https://giphy.com/channel/{query}', query): if webpage := self._download_webpage(f'https://giphy.com/channel/{query}', query):
if channel_id := self._html_search_regex(r'\{["\']channelId["\']:\s*([^\}]+)\}', if channel_id := self._html_search_regex(r'\{[^\}\n]*"channel_?[iI]d":\s*"?([^",\}]+)[",\}]',
webpage, 'channelId', default=None): webpage, 'channel_id', default=None):
return self._api_channel_feed(channel_id) return self._api_channel_feed(channel_id)
@ -373,11 +384,11 @@ class GiphySearchIE(GiphyBaseIE, SearchInfoExtractor):
'title': 'super mario', 'title': 'super mario',
}, },
}, { }, {
'url': 'giphysearch40:mickey&type=videos,stickers', 'url': 'giphysearch40:mickey&type=clips,stickers',
'playlist_count': 40, 'playlist_count': 40,
'info_dict': { 'info_dict': {
'id': 'mickey&type=videos,stickers', 'id': 'mickey&type=clips,stickers',
'title': 'mickey&type=videos,stickers', 'title': 'mickey&type=clips,stickers',
}, },
}] }]
@ -387,11 +398,11 @@ def search_query(query, offset, limit, category):
# https://api.giphy.com/v1/gifs/search?rating=pg-13&offset=40&limit=15&type=gifs&q={query}&excludeDynamicResults=undefined&api_key=Gc7131jiJuvI7IdN0HZ1D7nh0ow5BU6g&pingback_id=1904d6e524cee33d # https://api.giphy.com/v1/gifs/search?rating=pg-13&offset=40&limit=15&type=gifs&q={query}&excludeDynamicResults=undefined&api_key=Gc7131jiJuvI7IdN0HZ1D7nh0ow5BU6g&pingback_id=1904d6e524cee33d
return self._download_json( return self._download_json(
f'https://api.giphy.com/v1/{category}/search', query, f'https://api.giphy.com/v1/{category}/search', query,
note=f'Fetching {category} result {offset + 1}-{offset + limit}', query={ note=f'Downloading {category} result {offset + 1}-{offset + limit}', query={
'rating': 'r', # MPA film rating 'rating': 'r', # MPA film rating
'offset': offset, 'offset': offset,
'limit': limit, 'limit': limit,
'type': category, # known types: 'gifs', 'stickers', 'text', 'videos' 'type': category, # known types: 'clips', 'gifs', 'stickers', 'text', 'videos'
'q': query, 'q': query,
'excludeDynamicResults': 'undefined', 'excludeDynamicResults': 'undefined',
'api_key': self._GIPHY_FE_WEB_API_KEY, 'api_key': self._GIPHY_FE_WEB_API_KEY,
@ -399,7 +410,7 @@ def search_query(query, offset, limit, category):
# type: comma delimited list # type: comma delimited list
types = self._search_regex(r'&type=([^&]+)', query, 'type', default='gifs,stickers,videos') types = self._search_regex(r'&type=([^&]+)', query, 'type', default='gifs,stickers,videos')
types = [(f'{x}s' if x[-1] != 's' and any(x in t for t in ['gifs', 'stickers', 'videos']) else x) types = [(f'{x}s' if x[-1] != 's' and any(x in t for t in ['clips', 'gifs', 'stickers', 'videos']) else x)
for x in [x.strip() for x in types.lower().split(',')]] for x in [x.strip() for x in types.lower().split(',')]]
query = query.split('&type=')[0] query = query.split('&type=')[0]
@ -408,7 +419,8 @@ def search_query(query, offset, limit, category):
for _ in itertools.count(1): for _ in itertools.count(1):
for t in types: for t in types:
if t not in types_done: if t not in types_done:
search_results = search_query(query, offset, limit, t) search_type = 'videos' if t == 'clips' else t # clips use 'videos' type
search_results = search_query(query, offset, limit, search_type)
if not search_results.get('data'): if not search_results.get('data'):
self.to_screen(f'{query}: {offset} {t} found') self.to_screen(f'{query}: {offset} {t} found')
types_done.append(t) types_done.append(t)
@ -491,17 +503,17 @@ def _real_extract(self, url):
'webpage_url': video['gif']['url'], 'webpage_url': video['gif']['url'],
}) })
info = traverse_obj(data, { info = traverse_obj(data, {
'id': ('story_id', {str_or_none}), 'id': ('story_id', {lambda x: x or slug}),
'title': ('title', {str_or_none}), 'title': ('title', {str_or_none}),
'description': ('description', {str_or_none}), 'description': ('description', {str_or_none}),
'tags': ('tags', {list}), 'tags': ('tags', {list}),
'thumbnails': ('cover_gif', 'gif', 'images', {dict}, {lambda x: self._extract_formats(x, is_still=True)}), 'thumbnails': ('cover_gif', 'gif', 'images', {dict}, {lambda x: self._extract_formats(x, is_still=True)}),
'upload_date': (('create_datetime', 'publish_datetime'), {str_or_none}, 'upload_date': (('create_datetime', 'publish_datetime'),
{lambda x: x[:10].replace('-', '') if x else None}), {lambda x: x[:10].replace('-', '') if x else None}),
'uploader': ('user', ('display_name', 'username'), {str_or_none}), 'uploader': ('user', ('display_name', 'username'), {str_or_none}),
'uploader_id': ('user', 'username', {str_or_none}), 'uploader_id': ('user', 'username', {str_or_none}),
'uploader_url': ('user', ('profile_url', 'website_url'), {str_or_none}, 'uploader_url': ('user', ('profile_url', 'website_url'),
{lambda x: f'https://giphy.com{x}' if x[0] == '/' else url_or_none(x)}), {lambda x: f'https://giphy.com{x}' if x and x[0] == '/' else url_or_none(x)}),
}, get_all=False) }, get_all=False)
return { return {