1
0
Fork 0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2025-03-09 12:50:23 -05:00
This commit is contained in:
Patrick Robertson 2025-03-07 23:03:32 +01:00 committed by GitHub
commit 37eceeb8eb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -70,7 +70,6 @@ class FacebookIE(InfoExtractor):
IE_NAME = 'facebook'
_VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s'
_VIDEO_PAGE_TAHOE_TEMPLATE = 'https://www.facebook.com/video/tahoe/async/%s/?chain=true&isvideo=true&payloadtype=primary'
_TESTS = [{
'url': 'https://www.facebook.com/radiokicksfm/videos/3676516585958356/',
@ -238,7 +237,7 @@ class FacebookIE(InfoExtractor):
'info_dict': {
'id': '1569199726448814',
'ext': 'mp4',
'title': 'Pence MUST GO!',
'title': 'Trump/Musk & Vance MUST GO!',
'description': 'Vickie Gentry shared a memory.',
'timestamp': 1511548260,
'upload_date': '20171124',
@ -413,6 +412,13 @@ class FacebookIE(InfoExtractor):
}, {
'url': 'https://www.facebook.com/groups/1513990329015294/posts/d41d8cd9/2013209885760000/?app=fbl',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/WatchESLOne/videos/297860117405429/',
'info_dict': {
'id': '297860117405429',
},
'playlist_count': 1,
'skip': 'URL that previously required tahoe player, but currently not working. More info: https://github.com/ytdl-org/youtube-dl/issues/15441',
}]
_SUPPORTED_PAGLETS_REGEX = r'(?:pagelet_group_mall|permalink_video_pagelet|hyperfeed_story_id_[0-9a-f]+)'
_api_config = {
@ -478,11 +484,7 @@ def _perform_login(self, username, password):
self.report_warning(f'unable to log in: {err}')
return
def _extract_from_url(self, url, video_id):
webpage = self._download_webpage(
url.replace('://m.facebook.com/', '://www.facebook.com/'), video_id)
def extract_metadata(webpage):
def _extract_metadata(self, webpage, video_id):
post_data = [self._parse_json(j, video_id, fatal=False) for j in re.findall(
r'data-sjs>({.*?ScheduledServerJS.*?})</script>', webpage)]
post = traverse_obj(post_data, (
@ -543,9 +545,7 @@ def extract_metadata(webpage):
or (description or '').replace('\n', ' ') or f'Facebook video #{video_id}')
return merge_dicts(info_json_ld, info_dict)
video_data = None
def extract_video_data(instances):
def _extract_video_data(self, instances: list) -> list:
video_data = []
for item in instances:
if try_get(item, lambda x: x[1][0]) == 'VideoConfig':
@ -554,67 +554,7 @@ def extract_video_data(instances):
video_data.append(video_item['videoData'])
return video_data
server_js_data = self._parse_json(self._search_regex(
[r'handleServerJS\(({.+})(?:\);|,")', r'\bs\.handle\(({.+?})\);'],
webpage, 'server js data', default='{}'), video_id, fatal=False)
if server_js_data:
video_data = extract_video_data(server_js_data.get('instances', []))
def extract_from_jsmods_instances(js_data):
if js_data:
return extract_video_data(try_get(
js_data, lambda x: x['jsmods']['instances'], list) or [])
def extract_dash_manifest(vid_data, formats, mpd_url=None):
dash_manifest = traverse_obj(
vid_data, 'dash_manifest', 'playlist', 'dash_manifest_xml_string', 'manifest_xml', expected_type=str)
if dash_manifest:
formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)),
mpd_url=url_or_none(vid_data.get('dash_manifest_url')) or mpd_url))
def process_formats(info):
# Downloads with browser's User-Agent are rate limited. Working around
# with non-browser User-Agent.
for f in info['formats']:
# Downloads with browser's User-Agent are rate limited. Working around
# with non-browser User-Agent.
f.setdefault('http_headers', {})['User-Agent'] = 'facebookexternalhit/1.1'
# Formats larger than ~500MB will return error 403 unless chunk size is regulated
f.setdefault('downloader_options', {})['http_chunk_size'] = 250 << 20
def yield_all_relay_data(_filter):
for relay_data in re.findall(rf'data-sjs>({{.*?{_filter}.*?}})</script>', webpage):
yield self._parse_json(relay_data, video_id, fatal=False) or {}
def extract_relay_data(_filter):
return next(filter(None, yield_all_relay_data(_filter)), {})
def extract_relay_prefetched_data(_filter, target_keys=None):
path = 'data'
if target_keys is not None:
path = lambda k, v: k == 'data' and any(target in v for target in variadic(target_keys))
return traverse_obj(yield_all_relay_data(_filter), (
..., 'require', (None, (..., ..., ..., '__bbox', 'require')),
lambda _, v: any(key.startswith('RelayPrefetchedStreamCache') for key in v),
..., ..., '__bbox', 'result', path, {dict}), get_all=False) or {}
if not video_data:
server_js_data = self._parse_json(self._search_regex([
r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+' + self._SUPPORTED_PAGLETS_REGEX,
rf'bigPipe\.onPageletArrive\(({{.*?id\s*:\s*"{self._SUPPORTED_PAGLETS_REGEX}".*?}})\);',
], webpage, 'js data', default='{}'), video_id, js_to_json, False)
video_data = extract_from_jsmods_instances(server_js_data)
if not video_data:
data = extract_relay_prefetched_data(
r'"(?:dash_manifest|playable_url(?:_quality_hd)?)',
target_keys=('video', 'event', 'nodes', 'node', 'mediaset'))
if data:
entries = []
def parse_graphql_video(video):
def _parse_graphql_video(self, video, video_id, webpage) -> dict:
v_id = video.get('videoId') or video.get('id') or video_id
reel_info = traverse_obj(
video, ('creation_story', 'short_form_video_context', 'playback_video', {dict}))
@ -643,14 +583,14 @@ def parse_graphql_video(video):
'quality': q(format_id) - 3,
'url': playable_url,
})
extract_dash_manifest(fmt_data, formats)
self._extract_dash_manifest(fmt_data, formats)
# New videoDeliveryResponse formats extraction
fmt_data = traverse_obj(video, ('videoDeliveryResponseFragment', 'videoDeliveryResponseResult'))
mpd_urls = traverse_obj(fmt_data, ('dash_manifest_urls', ..., 'manifest_url', {url_or_none}))
dash_manifests = traverse_obj(fmt_data, ('dash_manifests', lambda _, v: v['manifest_xml']))
for idx, dash_manifest in enumerate(dash_manifests):
extract_dash_manifest(dash_manifest, formats, mpd_url=traverse_obj(mpd_urls, idx))
self._extract_dash_manifest(dash_manifest, formats, mpd_url=traverse_obj(mpd_urls, idx))
if not dash_manifests:
# Only extract from MPD URLs if the manifests are not already provided
for mpd_url in mpd_urls:
@ -704,7 +644,7 @@ def parse_graphql_video(video):
'automatic_captions': automatic_captions,
'subtitles': subtitles,
}
process_formats(info)
self._process_formats(info)
description = try_get(video, lambda x: x['savable_description']['text'])
title = video.get('name')
if title:
@ -714,12 +654,76 @@ def parse_graphql_video(video):
})
else:
info['title'] = description or f'Facebook video #{v_id}'
entries.append(info)
return info
def _extract_dash_manifest(self, vid_data, formats, mpd_url=None):
dash_manifest = traverse_obj(
vid_data, 'dash_manifest', 'playlist', 'dash_manifest_xml_string', 'manifest_xml', expected_type=str)
if dash_manifest:
formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)),
mpd_url=url_or_none(vid_data.get('dash_manifest_url')) or mpd_url))
def _process_formats(self, info: dict) -> None:
# Downloads with browser's User-Agent are rate limited. Working around
# with non-browser User-Agent.
for f in info['formats']:
# Downloads with browser's User-Agent are rate limited. Working around
# with non-browser User-Agent.
f.setdefault('http_headers', {})['User-Agent'] = 'facebookexternalhit/1.1'
# Formats larger than ~500MB will return error 403 unless chunk size is regulated
f.setdefault('downloader_options', {})['http_chunk_size'] = 250 << 20
def _extract_from_jsmods_instances(self, js_data):
if js_data:
return self._extract_video_data(try_get(
js_data, lambda x: x['jsmods']['instances'], list) or [])
def _yield_all_relay_data(self, _filter, webpage):
for relay_data in re.findall(rf'data-sjs>({{.*?{_filter}.*?}})</script>', webpage):
yield self._parse_json(relay_data, None, fatal=False) or {}
def _extract_relay_prefetched_data(self, _filter, webpage, target_keys=None):
path = 'data'
if target_keys is not None:
path = lambda k, v: k == 'data' and any(target in v for target in variadic(target_keys))
return traverse_obj(self._yield_all_relay_data(_filter, webpage), (
..., 'require', (None, (..., ..., ..., '__bbox', 'require')),
lambda _, v: any(key.startswith('RelayPrefetchedStreamCache') for key in v),
..., ..., '__bbox', 'result', path, {dict}), get_all=False) or {}
def _extract_from_url(self, url, video_id):
webpage = self._download_webpage(
url.replace('://m.facebook.com/', '://www.facebook.com/'), video_id)
video_data = None
server_js_data = self._parse_json(self._search_regex(
[r'handleServerJS\(({.+})(?:\);|,")', r'\bs\.handle\(({.+?})\);'],
webpage, 'server js data', default='{}'), video_id, fatal=False)
if server_js_data:
video_data = self._extract_video_data(server_js_data.get('instances', []))
if not video_data:
server_js_data = self._parse_json(self._search_regex([
r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+' + self._SUPPORTED_PAGLETS_REGEX,
rf'bigPipe\.onPageletArrive\(({{.*?id\s*:\s*"{self._SUPPORTED_PAGLETS_REGEX}".*?}})\);',
], webpage, 'js data', default='{}'), video_id, js_to_json, False)
video_data = self._extract_from_jsmods_instances(server_js_data)
if not video_data:
data = self._extract_relay_prefetched_data(
r'"(?:dash_manifest|playable_url(?:_quality_hd)?)',
webpage,
target_keys=('video', 'event', 'nodes', 'node', 'mediaset'))
if data:
entries = []
def parse_attachment(attachment, key='media'):
media = attachment.get(key) or {}
if media.get('__typename') == 'Video':
return parse_graphql_video(media)
entries.append(self._parse_graphql_video(media, video_id, webpage))
nodes = variadic(traverse_obj(data, 'nodes', 'node') or [])
attachments = traverse_obj(nodes, (
@ -747,13 +751,13 @@ def parse_attachment(attachment, key='media'):
for attachment in attachments:
parse_attachment(attachment)
if not entries:
parse_graphql_video(video)
entries.append(self._parse_graphql_video(video, video_id, webpage))
if len(entries) > 1:
return self.playlist_result(entries, video_id)
video_info = entries[0] if entries else {'id': video_id}
webpage_info = extract_metadata(webpage)
webpage_info = self._extract_metadata(webpage, video_id)
# honor precise duration in video info
if video_info.get('duration'):
webpage_info['duration'] = video_info['duration']
@ -782,13 +786,14 @@ def parse_attachment(attachment, key='media'):
}),
}
prefetched_data = extract_relay_prefetched_data(r'"login_data"\s*:\s*{')
prefetched_data = self._extract_relay_prefetched_data(r'"login_data"\s*:\s*{', webpage)
if prefetched_data:
lsd = try_get(prefetched_data, lambda x: x['login_data']['lsd'], dict)
if lsd:
post_data[lsd['name']] = lsd['value']
relay_data = extract_relay_data(r'\[\s*"RelayAPIConfigDefaults"\s*,')
relay_data = next(filter(None, self._yield_all_relay_data(r'\[\s*"RelayAPIConfigDefaults"\s*,', webpage)), {})
for define in (relay_data.get('define') or []):
if define[0] == 'RelayAPIConfigDefaults':
self._api_config = define[2]
@ -810,33 +815,6 @@ def parse_attachment(attachment, key='media'):
return self.playlist_result(entries, video_id)
if not video_data:
# Video info not in first request, do a secondary request using
# tahoe player specific URL
tahoe_data = self._download_webpage(
self._VIDEO_PAGE_TAHOE_TEMPLATE % video_id, video_id,
data=urlencode_postdata({
'__a': 1,
'__pc': self._search_regex(
r'pkg_cohort["\']\s*:\s*["\'](.+?)["\']', webpage,
'pkg cohort', default='PHASED:DEFAULT'),
'__rev': self._search_regex(
r'client_revision["\']\s*:\s*(\d+),', webpage,
'client revision', default='3944515'),
'fb_dtsg': self._search_regex(
r'"DTSGInitialData"\s*,\s*\[\]\s*,\s*{\s*"token"\s*:\s*"([^"]+)"',
webpage, 'dtsg token', default=''),
}),
headers={
'Content-Type': 'application/x-www-form-urlencoded',
})
tahoe_js_data = self._parse_json(
self._search_regex(
r'for\s+\(\s*;\s*;\s*\)\s*;(.+)', tahoe_data,
'tahoe js data', default='{}'),
video_id, fatal=False)
video_data = extract_from_jsmods_instances(tahoe_js_data)
if not video_data:
raise ExtractorError('Cannot parse data')
@ -874,7 +852,7 @@ def parse_attachment(attachment, key='media'):
'quality': preference,
'height': 720 if quality == 'hd' else None,
})
extract_dash_manifest(f[0], formats)
self._extract_dash_manifest(f[0], formats)
subtitles_src = f[0].get('subtitles_src')
if subtitles_src:
subtitles.setdefault('en', []).append({'url': subtitles_src})
@ -884,8 +862,8 @@ def parse_attachment(attachment, key='media'):
'formats': formats,
'subtitles': subtitles,
}
process_formats(info_dict)
info_dict.update(extract_metadata(webpage))
self._process_formats(info_dict)
info_dict.update(self._extract_metadata(webpage, video_id))
return info_dict