1
0
mirror of https://github.com/yt-dlp/yt-dlp synced 2025-12-16 06:05:41 +07:00

Compare commits

...

3 Commits

Author SHA1 Message Date
sepro
a86eeaadf2 [ie/ntv.ru] Swap metadata priority (#14939)
Fix 5dde0d0c9f

Authored by: seproDev
2025-11-08 11:41:17 +01:00
sepro
5dde0d0c9f [ie/ntv.ru] Rework extractor (#14934)
Closes #14929, Closes #14761
Authored by: seproDev, anlar

Co-authored-by: Anton Larionov <11796525+anlar@users.noreply.github.com>
2025-11-08 10:51:21 +01:00
coletdjnz
5767fb4ab1 [networking] Ensure underlying file object is closed when fully read (#14935)
Fixes https://github.com/yt-dlp/yt-dlp/issues/14891

Authored by: coletdjnz
2025-11-08 18:30:43 +13:00
6 changed files with 218 additions and 73 deletions

View File

@@ -3,6 +3,7 @@
# Allow direct execution
import os
import sys
from unittest.mock import MagicMock
import pytest
@@ -614,8 +615,11 @@ def test_source_address(self, handler):
@pytest.mark.skip_handler('CurlCFFI', 'not supported by curl-cffi')
def test_gzip_trailing_garbage(self, handler):
with handler() as rh:
data = validate_and_send(rh, Request(f'http://localhost:{self.http_port}/trailing_garbage')).read().decode()
res = validate_and_send(rh, Request(f'http://localhost:{self.http_port}/trailing_garbage'))
data = res.read().decode()
assert data == '<html><video src="/vid.mp4" /></html>'
# Should auto-close and mark the response adaptor as closed
assert res.closed
@pytest.mark.skip_handler('CurlCFFI', 'not applicable to curl-cffi')
@pytest.mark.skipif(not brotli, reason='brotli support is not installed')
@@ -627,6 +631,8 @@ def test_brotli(self, handler):
headers={'ytdl-encoding': 'br'}))
assert res.headers.get('Content-Encoding') == 'br'
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
# Should auto-close and mark the response adaptor as closed
assert res.closed
def test_deflate(self, handler):
with handler() as rh:
@@ -636,6 +642,8 @@ def test_deflate(self, handler):
headers={'ytdl-encoding': 'deflate'}))
assert res.headers.get('Content-Encoding') == 'deflate'
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
# Should auto-close and mark the response adaptor as closed
assert res.closed
def test_gzip(self, handler):
with handler() as rh:
@@ -645,6 +653,8 @@ def test_gzip(self, handler):
headers={'ytdl-encoding': 'gzip'}))
assert res.headers.get('Content-Encoding') == 'gzip'
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
# Should auto-close and mark the response adaptor as closed
assert res.closed
def test_multiple_encodings(self, handler):
with handler() as rh:
@@ -655,6 +665,8 @@ def test_multiple_encodings(self, handler):
headers={'ytdl-encoding': pair}))
assert res.headers.get('Content-Encoding') == pair
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
# Should auto-close and mark the response adaptor as closed
assert res.closed
@pytest.mark.skip_handler('CurlCFFI', 'not supported by curl-cffi')
def test_unsupported_encoding(self, handler):
@@ -665,6 +677,8 @@ def test_unsupported_encoding(self, handler):
headers={'ytdl-encoding': 'unsupported', 'Accept-Encoding': '*'}))
assert res.headers.get('Content-Encoding') == 'unsupported'
assert res.read() == b'raw'
# Should auto-close and mark the response adaptor as closed
assert res.closed
def test_read(self, handler):
with handler() as rh:
@@ -672,9 +686,13 @@ def test_read(self, handler):
rh, Request(f'http://127.0.0.1:{self.http_port}/headers'))
assert res.readable()
assert res.read(1) == b'H'
# Ensure we don't close the adaptor yet
assert not res.closed
assert res.read(3) == b'ost'
assert res.read().decode().endswith('\n\n')
assert res.read() == b''
# Should auto-close and mark the response adaptor as closed
assert res.closed
def test_request_disable_proxy(self, handler):
for proxy_proto in handler._SUPPORTED_PROXY_SCHEMES or ['http']:
@@ -875,11 +893,31 @@ def test_file_urls(self, handler):
with handler(enable_file_urls=True) as rh:
res = validate_and_send(rh, req)
assert res.read() == b'foobar'
res.close()
assert res.read(1) == b'f'
assert not res.fp.closed
assert res.read() == b'oobar'
# Should automatically close the underlying file object
assert res.fp.closed
os.unlink(tf.name)
def test_data_uri_auto_close(self, handler):
with handler() as rh:
res = validate_and_send(rh, Request('data:text/plain,hello%20world'))
assert res.read() == b'hello world'
# Should automatically close the underlying file object
assert res.fp.closed
assert res.closed
def test_http_response_auto_close(self, handler):
with handler() as rh:
res = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/gen_200'))
assert res.read() == b'<html></html>'
# Should automatically close the underlying file object in the HTTP Response
assert isinstance(res.fp, http.client.HTTPResponse)
assert res.fp.fp is None
assert res.closed
def test_http_error_returns_content(self, handler):
# urllib HTTPError will try close the underlying response if reference to the HTTPError object is lost
def get_response():
@@ -1012,6 +1050,14 @@ def mock_close(*args, **kwargs):
rh.close()
assert called
def test_http_response_auto_close(self, handler):
with handler() as rh:
res = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/gen_200'))
assert res.read() == b'<html></html>'
# Should automatically close the underlying file object in the HTTP Response
assert res.fp.closed
assert res.closed
@pytest.mark.parametrize('handler', ['CurlCFFI'], indirect=True)
class TestCurlCFFIRequestHandler(TestRequestHandlerBase):
@@ -1177,6 +1223,14 @@ def close(self):
assert res4.closed
assert res4._buffer == b''
def test_http_response_auto_close(self, handler):
with handler() as rh:
res = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/gen_200'))
assert res.read() == b'<html></html>'
# Should automatically close the underlying file object in the HTTP Response
assert res.fp.closed
assert res.closed
def run_validation(handler, error, req, **handler_kwargs):
with handler(**handler_kwargs) as rh:
@@ -2032,6 +2086,30 @@ def test_compat(self):
assert res.info() is res.headers
assert res.getheader('test') == res.get_header('test')
def test_auto_close(self):
# Should mark the response as closed if the underlying file is closed
class AutoCloseBytesIO(io.BytesIO):
def read(self, size=-1, /):
data = super().read(size)
self.close()
return data
fp = AutoCloseBytesIO(b'test')
res = Response(fp, url='test://', headers={}, status=200)
assert not res.closed
res.read()
assert res.closed
def test_close(self):
# Should not call close() on the underlying file when already closed
fp = MagicMock()
fp.closed = False
res = Response(fp, url='test://', headers={}, status=200)
res.close()
fp.closed = True
res.close()
assert fp.close.call_count == 1
class TestImpersonateTarget:
@pytest.mark.parametrize('target_str,expected', [

View File

@@ -1,17 +1,40 @@
from .common import InfoExtractor
from ..utils import (
int_or_none,
strip_or_none,
parse_iso8601,
unescapeHTML,
url_or_none,
xpath_text,
)
from ..utils.traversal import traverse_obj
class NTVRuIE(InfoExtractor):
IE_NAME = 'ntv.ru'
_VALID_URL = r'https?://(?:www\.)?ntv\.ru/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_VALID_URL = r'https?://(?:www\.)?ntv\.ru/(?:[^/#?]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
# JSON Api is geo restricted
'url': 'https://www.ntv.ru/peredacha/svoya_igra/m58980/o818800',
'md5': '818962a1b52747d446db7cd5be43e142',
'info_dict': {
'id': '2520563',
'ext': 'mp4',
'title': 'Участники: Ирина Петрова, Сергей Коновалов, Кристина Кораблина',
'description': 'md5:fcbd21cd45238a940b95550f9e178e3e',
'thumbnail': r're:^http://.*\.jpg',
'duration': 2462,
'view_count': int,
'comment_count': int,
'tags': ['игры и игрушки'],
'timestamp': 1761821096,
'upload_date': '20251030',
'release_timestamp': 1761821096,
'release_date': '20251030',
'modified_timestamp': 1761821096,
'modified_date': '20251030',
},
}, {
'url': 'http://www.ntv.ru/novosti/863142/',
'md5': 'ba7ea172a91cb83eb734cad18c10e723',
'info_dict': {
@@ -22,31 +45,35 @@ class NTVRuIE(InfoExtractor):
'thumbnail': r're:^http://.*\.jpg',
'duration': 136,
'view_count': int,
'comment_count': int,
'tags': ['ВМС', 'захват', 'митинги', 'Севастополь', 'Украина'],
'timestamp': 1395222013,
'upload_date': '20140319',
'release_timestamp': 1395222013,
'release_date': '20140319',
'modified_timestamp': 1395222013,
'modified_date': '20140319',
},
}, {
'url': 'http://www.ntv.ru/video/novosti/750370/',
'md5': 'adecff79691b4d71e25220a191477124',
'info_dict': {
'id': '750370',
'ext': 'mp4',
'title': 'Родные пассажиров пропавшего Boeing не верят в трагический исход',
'description': 'Родные пассажиров пропавшего Boeing не верят в трагический исход',
'thumbnail': r're:^http://.*\.jpg',
'duration': 172,
'view_count': int,
},
'skip': '404 Not Found',
}, {
# Requires unescapeHTML
'url': 'http://www.ntv.ru/peredacha/segodnya/m23700/o232416',
'md5': '82dbd49b38e3af1d00df16acbeab260c',
'info_dict': {
'id': '747480',
'ext': 'mp4',
'title': '«Сегодня». 21 марта 2014 года. 16:00',
'description': '«Сегодня». 21 марта 2014 года. 16:00',
'title': '"Сегодня". 21 марта 2014 года. 16:00 ',
'description': 'md5:bed80745ca72af557433195f51a02785',
'thumbnail': r're:^http://.*\.jpg',
'duration': 1496,
'view_count': int,
'comment_count': int,
'tags': ['Брюссель', 'гражданство', 'ЕС', 'Крым', 'ОСАГО', 'саммит', 'санкции', 'события', 'чиновники', 'рейтинг'],
'timestamp': 1395406951,
'upload_date': '20140321',
'release_timestamp': 1395406951,
'release_date': '20140321',
'modified_timestamp': 1395406951,
'modified_date': '20140321',
},
}, {
'url': 'https://www.ntv.ru/kino/Koma_film/m70281/o336036/video/',
@@ -54,11 +81,19 @@ class NTVRuIE(InfoExtractor):
'info_dict': {
'id': '1126480',
'ext': 'mp4',
'title': 'Остросюжетный фильм «Кома»',
'description': 'Остросюжетный фильм «Кома»',
'title': 'Остросюжетный фильм "Кома"',
'description': 'md5:e79ffd0887425a0f05a58885c408d7d8',
'thumbnail': r're:^http://.*\.jpg',
'duration': 5592,
'duration': 5608,
'view_count': int,
'comment_count': int,
'tags': ['кино'],
'timestamp': 1432868572,
'upload_date': '20150529',
'release_timestamp': 1432868572,
'release_date': '20150529',
'modified_timestamp': 1432868572,
'modified_date': '20150529',
},
}, {
'url': 'http://www.ntv.ru/serial/Delo_vrachey/m31760/o233916/',
@@ -66,11 +101,19 @@ class NTVRuIE(InfoExtractor):
'info_dict': {
'id': '751482',
'ext': 'mp4',
'title': '«Дело врачей»: «Деревце жизни»',
'description': '«Дело врачей»: «Деревце жизни»',
'title': '"Дело врачей": "Деревце жизни"',
'description': 'md5:d6fbf9193f880f50d9cbfbcc954161c1',
'thumbnail': r're:^http://.*\.jpg',
'duration': 2590,
'view_count': int,
'comment_count': int,
'tags': ['врачи', 'больницы'],
'timestamp': 1395882300,
'upload_date': '20140327',
'release_timestamp': 1395882300,
'release_date': '20140327',
'modified_timestamp': 1395882300,
'modified_date': '20140327',
},
}, {
# Schemeless file URL
@@ -78,48 +121,26 @@ class NTVRuIE(InfoExtractor):
'only_matching': True,
}]
_VIDEO_ID_REGEXES = [
r'<meta property="og:url" content="https?://www\.ntv\.ru/video/(\d+)',
r'<meta property="og:video:(?:url|iframe)" content="https?://www\.ntv\.ru/embed/(\d+)',
r'<video embed=[^>]+><id>(\d+)</id>',
r'<video restriction[^>]+><key>(\d+)</key>',
]
def _real_extract(self, url):
video_id = self._match_id(url)
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
webpage = self._download_webpage(url, video_id)
video_url = self._og_search_property(
('video', 'video:iframe'), webpage, default=None)
if video_url:
video_id = self._search_regex(
r'https?://(?:www\.)?ntv\.ru/video/(?:embed/)?(\d+)',
video_url, 'video id', default=None)
if not video_id:
video_id = self._html_search_regex(
self._VIDEO_ID_REGEXES, webpage, 'video id')
video_id = self._html_search_regex(
r'<meta property="ya:ovs:feed_url" content="https?://www\.ntv\.ru/(?:exp/)?video/(\d+)', webpage, 'video id')
player = self._download_xml(
f'http://www.ntv.ru/vi{video_id}/',
video_id, 'Downloading video XML')
title = strip_or_none(unescapeHTML(xpath_text(player, './data/title', 'title', fatal=True)))
video = player.find('./data/video')
formats = []
for format_id in ['', 'hi', 'webm']:
file_ = xpath_text(video, f'./{format_id}file')
if not file_:
video_url = url_or_none(xpath_text(video, f'./{format_id}file'))
if not video_url:
continue
if file_.startswith('//'):
file_ = self._proto_relative_url(file_)
elif not file_.startswith('http'):
file_ = 'http://media.ntv.ru/vod/' + file_
formats.append({
'url': file_,
'url': video_url,
'filesize': int_or_none(xpath_text(video, f'./{format_id}size')),
})
hls_manifest = xpath_text(video, './playback/hls')
@@ -131,12 +152,28 @@ def _real_extract(self, url):
formats.extend(self._extract_mpd_formats(
dash_manifest, video_id, mpd_id='dash', fatal=False))
metadata = self._download_xml(
f'https://www.ntv.ru/exp/video/{video_id}', video_id, 'Downloading XML metadata', fatal=False)
return {
'id': xpath_text(video, './id'),
'title': title,
'description': strip_or_none(unescapeHTML(xpath_text(player, './data/description'))),
'thumbnail': xpath_text(video, './splash'),
'duration': int_or_none(xpath_text(video, './totaltime')),
'view_count': int_or_none(xpath_text(video, './views')),
'id': video_id,
'formats': formats,
**traverse_obj(player, {
'title': ('data/title/text()', ..., {str}, {unescapeHTML}, any),
'description': ('data/description/text()', ..., {str}, {unescapeHTML}, any),
'duration': ('data/video/totaltime/text()', ..., {int_or_none}, any),
'view_count': ('data/video/views/text()', ..., {int_or_none}, any),
'thumbnail': ('data/video/splash/text()', ..., {url_or_none}, any),
}),
**traverse_obj(metadata, {
'title': ('{*}title/text()', ..., {str}, {unescapeHTML}, any),
'description': ('{*}description/text()', ..., {str}, {unescapeHTML}, any),
'duration': ('{*}duration/text()', ..., {int_or_none}, any),
'timestamp': ('{*}create_date/text()', ..., {parse_iso8601}, any),
'release_timestamp': ('{*}upload_date/text()', ..., {parse_iso8601}, any),
'modified_timestamp': ('{*}modify_date/text()', ..., {parse_iso8601}, any),
'tags': ('{*}tag/text()', ..., {str}, {lambda x: x.split(',')}, ..., {str.strip}, filter),
'view_count': ('{*}stats/views_total/text()', ..., {int_or_none}, any),
'comment_count': ('{*}stats/comments/text()', ..., {int_or_none}, any),
}),
}

View File

@@ -96,7 +96,10 @@ def __init__(self, response: curl_cffi.requests.Response):
def read(self, amt=None):
try:
return self.fp.read(amt)
res = self.fp.read(amt)
if self.fp.closed:
self.close()
return res
except curl_cffi.requests.errors.RequestsError as e:
if e.code == CurlECode.PARTIAL_FILE:
content_length = e.response and int_or_none(e.response.headers.get('Content-Length'))

View File

@@ -119,17 +119,22 @@ def __init__(self, res: requests.models.Response):
self._requests_response = res
def _real_read(self, amt: int | None = None) -> bytes:
# Work around issue with `.read(amt)` then `.read()`
# See: https://github.com/urllib3/urllib3/issues/3636
if amt is None:
# compat: py3.9: Python 3.9 preallocates the whole read buffer, read in chunks
read_chunk = functools.partial(self.fp.read, 1 << 20, decode_content=True)
return b''.join(iter(read_chunk, b''))
# Interact with urllib3 response directly.
return self.fp.read(amt, decode_content=True)
def read(self, amt: int | None = None):
try:
# Work around issue with `.read(amt)` then `.read()`
# See: https://github.com/urllib3/urllib3/issues/3636
if amt is None:
# compat: py3.9: Python 3.9 preallocates the whole read buffer, read in chunks
read_chunk = functools.partial(self.fp.read, 1 << 20, decode_content=True)
return b''.join(iter(read_chunk, b''))
# Interact with urllib3 response directly.
return self.fp.read(amt, decode_content=True)
data = self._real_read(amt)
if self.fp.closed:
self.close()
return data
# See urllib3.response.HTTPResponse.read() for exceptions raised on read
except urllib3.exceptions.SSLError as e:
raise SSLError(cause=e) from e

View File

@@ -306,7 +306,25 @@ def __init__(self, res: http.client.HTTPResponse | urllib.response.addinfourl):
def read(self, amt=None):
try:
return self.fp.read(amt)
data = self.fp.read(amt)
underlying = getattr(self.fp, 'fp', None)
if isinstance(self.fp, http.client.HTTPResponse) and underlying is None:
# http.client.HTTPResponse automatically closes itself when fully read
self.close()
elif isinstance(self.fp, urllib.response.addinfourl) and underlying is not None:
# urllib's addinfourl does not close the underlying fp automatically when fully read
if isinstance(underlying, io.BytesIO):
# data URLs or in-memory responses (e.g. gzip/deflate/brotli decoded)
if underlying.tell() >= len(underlying.getbuffer()):
self.close()
elif isinstance(underlying, io.BufferedReader) and amt is None:
# file URLs.
# XXX: this will not mark the response as closed if it was fully read with amt.
self.close()
elif underlying is not None and underlying.closed:
# Catch-all for any cases where underlying file is closed
self.close()
return data
except Exception as e:
handle_response_read_exceptions(e)
raise e

View File

@@ -554,12 +554,16 @@ def read(self, amt: int | None = None) -> bytes:
# Expected errors raised here should be of type RequestError or subclasses.
# Subclasses should redefine this method with more precise error handling.
try:
return self.fp.read(amt)
res = self.fp.read(amt)
if self.fp.closed:
self.close()
return res
except Exception as e:
raise TransportError(cause=e) from e
def close(self):
self.fp.close()
if not self.fp.closed:
self.fp.close()
return super().close()
def get_header(self, name, default=None):