mirror of
https://github.com/yt-dlp/yt-dlp
synced 2025-12-16 14:15:41 +07:00
Compare commits
208 Commits
2021.11.10
...
2021.12.25
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
459aea84c3 | ||
|
|
87e0499624 | ||
|
|
0f86a1cd59 | ||
|
|
d80d98e7d4 | ||
|
|
352d5da812 | ||
|
|
d43de6821c | ||
|
|
070f6a85ea | ||
|
|
4b4b7f746c | ||
|
|
e9efb99f66 | ||
|
|
a709d87335 | ||
|
|
774a46c53d | ||
|
|
c8b80b9643 | ||
|
|
4e260d1a56 | ||
|
|
4f3fa23e5a | ||
|
|
b28bac93ab | ||
|
|
37893bb0c9 | ||
|
|
c25de59cf7 | ||
|
|
205a0654c0 | ||
|
|
663949f825 | ||
|
|
b69fd25c25 | ||
|
|
e0fd95737d | ||
|
|
4ac5b94807 | ||
|
|
4273cc776d | ||
|
|
fa9f30b802 | ||
|
|
1cefca9e44 | ||
|
|
5edb8dfec2 | ||
|
|
0fcba15d57 | ||
|
|
adbc4ec4bb | ||
|
|
c031b0414c | ||
|
|
f3aa3c3f98 | ||
|
|
ae43a4b986 | ||
|
|
ca5db158ae | ||
|
|
5f549d4959 | ||
|
|
6839d02cb6 | ||
|
|
2aae2c91ff | ||
|
|
c2dedf12e8 | ||
|
|
e75bb0d6c3 | ||
|
|
dd0228ce1f | ||
|
|
37e57a9fd4 | ||
|
|
940a67a3e2 | ||
|
|
e6ae51c123 | ||
|
|
75ad33572b | ||
|
|
aab41cdd33 | ||
|
|
b3a5115ff1 | ||
|
|
d76d15a669 | ||
|
|
e978789f0f | ||
|
|
ec2e44fc57 | ||
|
|
375d9360bf | ||
|
|
d5c3254889 | ||
|
|
fed1309651 | ||
|
|
fe69f52e5c | ||
|
|
3116be32b4 | ||
|
|
a8549f19e7 | ||
|
|
39ca3b5c7f | ||
|
|
46383212b3 | ||
|
|
0bb322b9c0 | ||
|
|
ff9f925b63 | ||
|
|
5bfc8bee5a | ||
|
|
19188702ef | ||
|
|
d984a98def | ||
|
|
069c6ccf02 | ||
|
|
53dad39e30 | ||
|
|
db77c49c84 | ||
|
|
abc07b554c | ||
|
|
86f3d52f8c | ||
|
|
8b688881ba | ||
|
|
13debc86e7 | ||
|
|
b5f94e4fa1 | ||
|
|
61882afdc5 | ||
|
|
aa4b054512 | ||
|
|
487c5b3389 | ||
|
|
8157a09d22 | ||
|
|
b1aaf1c07f | ||
|
|
5f9aaac8c2 | ||
|
|
54c2521ca6 | ||
|
|
2814f12ba4 | ||
|
|
1619836cb7 | ||
|
|
e3c7d49571 | ||
|
|
ddd24c9949 | ||
|
|
443b21dc4e | ||
|
|
66f4c04e50 | ||
|
|
93864403ea | ||
|
|
b5475f1145 | ||
|
|
38d79fd16c | ||
|
|
acc0d6a411 | ||
|
|
146cc4114a | ||
|
|
818faa3a86 | ||
|
|
aa5ecf082c | ||
|
|
d2b2fca53f | ||
|
|
63ccf4ff1a | ||
|
|
43b2290658 | ||
|
|
99148c6a33 | ||
|
|
9bdd99cf39 | ||
|
|
2c4aaaddc9 | ||
|
|
5f7cb91ae9 | ||
|
|
3efb96a6d1 | ||
|
|
3262f8abf2 | ||
|
|
bdbafb3913 | ||
|
|
a804f6d89c | ||
|
|
814dfb7e25 | ||
|
|
91f071af60 | ||
|
|
2aa5e2cc01 | ||
|
|
1bad50eced | ||
|
|
ac0efabf12 | ||
|
|
73f035e1fe | ||
|
|
0cbed930c8 | ||
|
|
5118d2ec58 | ||
|
|
717216b093 | ||
|
|
5c22c63da3 | ||
|
|
ee8dd27a73 | ||
|
|
f304da8a29 | ||
|
|
06dfe0a0a2 | ||
|
|
75b725a7cc | ||
|
|
13ab5fa586 | ||
|
|
36eaf3039a | ||
|
|
f2ebc5c7be | ||
|
|
b222c27145 | ||
|
|
5e5be0c0b2 | ||
|
|
7578d77d8c | ||
|
|
b29165267f | ||
|
|
bc104778d6 | ||
|
|
d298d33fe6 | ||
|
|
bf57cfa8b7 | ||
|
|
3c2208f82d | ||
|
|
93e597ba28 | ||
|
|
b28cdcc0e4 | ||
|
|
a33c0d9c5d | ||
|
|
75689fe59b | ||
|
|
5ce1d13eba | ||
|
|
e04b003e64 | ||
|
|
909b0d66f4 | ||
|
|
dfd78699f5 | ||
|
|
639f80c1f9 | ||
|
|
896a88c5c6 | ||
|
|
4e4ba1d75f | ||
|
|
2abf081554 | ||
|
|
359df0fc42 | ||
|
|
3938a9212c | ||
|
|
cf1f13b817 | ||
|
|
18d6dd4e01 | ||
|
|
883ecd5494 | ||
|
|
eb56d132d2 | ||
|
|
17b4540662 | ||
|
|
da27aeea5c | ||
|
|
fec41d17a5 | ||
|
|
a61fd4cf6f | ||
|
|
a6213a4925 | ||
|
|
9941a1e127 | ||
|
|
ff51ed588f | ||
|
|
57dbe8077f | ||
|
|
e5d731f35d | ||
|
|
d52cd2f5cd | ||
|
|
bc8ab44ea0 | ||
|
|
8f122fa070 | ||
|
|
14a086058a | ||
|
|
0e6b018a10 | ||
|
|
f7b558df4d | ||
|
|
1ee34c76bb | ||
|
|
234416e4bf | ||
|
|
c98d4df23b | ||
|
|
849d699a8b | ||
|
|
77fcc65158 | ||
|
|
545ad64988 | ||
|
|
d76991ab07 | ||
|
|
282f570918 | ||
|
|
c07a39ae8e | ||
|
|
c5e3f84972 | ||
|
|
c45b87419f | ||
|
|
7333296ff5 | ||
|
|
a04e005521 | ||
|
|
6b993ca765 | ||
|
|
dd2a987d3f | ||
|
|
9222c38182 | ||
|
|
467b6b8387 | ||
|
|
8863c8f09e | ||
|
|
e16fefd869 | ||
|
|
c6118ca2cc | ||
|
|
764f5de2f4 | ||
|
|
cfcaf64a4b | ||
|
|
402cd603a4 | ||
|
|
22a510ff44 | ||
|
|
61be785a67 | ||
|
|
11852843e7 | ||
|
|
525d9e0c7d | ||
|
|
9d63137eac | ||
|
|
266a1b5d52 | ||
|
|
450bdf69bc | ||
|
|
720c309932 | ||
|
|
d8cf8d97a8 | ||
|
|
d0d012d4e7 | ||
|
|
013b50b794 | ||
|
|
dac5df5a98 | ||
|
|
f279aaee8e | ||
|
|
d0e6121adf | ||
|
|
9ac24e235e | ||
|
|
7c7f7161fc | ||
|
|
e339d25a0d | ||
|
|
39c04074e7 | ||
|
|
92775d8a40 | ||
|
|
df03de2c02 | ||
|
|
48e9310660 | ||
|
|
c1dc0ee56e | ||
|
|
bf5f605e76 | ||
|
|
e08a85d865 | ||
|
|
093a17107e | ||
|
|
44bcb8d122 | ||
|
|
013ae2e503 | ||
|
|
b47d236d72 |
8
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
8
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: Broken site support
|
||||
description: Report broken or misfunctioning site
|
||||
labels: [triage, extractor-bug]
|
||||
labels: [triage, site-bug]
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
@@ -11,7 +11,7 @@ body:
|
||||
options:
|
||||
- label: I'm reporting a broken site
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
- label: I've verified that I'm running yt-dlp version **2021.12.25**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||
required: true
|
||||
@@ -51,12 +51,12 @@ body:
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version 2021.11.10.1 (exe)
|
||||
[debug] yt-dlp version 2021.12.25 (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (2021.11.10.1)
|
||||
yt-dlp is up to date (2021.12.25)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
||||
@@ -11,7 +11,7 @@ body:
|
||||
options:
|
||||
- label: I'm reporting a new site support request
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
- label: I've verified that I'm running yt-dlp version **2021.12.25**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||
required: true
|
||||
@@ -34,7 +34,7 @@ body:
|
||||
label: Example URLs
|
||||
description: |
|
||||
Provide all kinds of example URLs for which support should be added
|
||||
value: |
|
||||
placeholder: |
|
||||
- Single video: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
- Single video: https://youtu.be/BaW_jenozKc
|
||||
- Playlist: https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc
|
||||
@@ -62,12 +62,12 @@ body:
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version 2021.11.10.1 (exe)
|
||||
[debug] yt-dlp version 2021.12.25 (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (2021.11.10.1)
|
||||
yt-dlp is up to date (2021.12.25)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
name: Site feature request
|
||||
description: Request a new functionality for a site
|
||||
description: Request a new functionality for a supported site
|
||||
labels: [triage, site-enhancement]
|
||||
body:
|
||||
- type: checkboxes
|
||||
@@ -11,7 +11,7 @@ body:
|
||||
options:
|
||||
- label: I'm reporting a site feature request
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
- label: I've verified that I'm running yt-dlp version **2021.12.25**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||
required: true
|
||||
@@ -47,3 +47,26 @@ body:
|
||||
placeholder: WRITE DESCRIPTION HERE
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: log
|
||||
attributes:
|
||||
label: Verbose log
|
||||
description: |
|
||||
Provide the complete verbose output of yt-dlp that demonstrates the need for the enhancement.
|
||||
Add the `-Uv` flag to your command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version 2021.12.25 (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (2021.12.25)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
8
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: Bug report
|
||||
description: Report a bug unrelated to any particular site or extractor
|
||||
labels: [triage,bug]
|
||||
labels: [triage, bug]
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
@@ -11,7 +11,7 @@ body:
|
||||
options:
|
||||
- label: I'm reporting a bug unrelated to a specific site
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
- label: I've verified that I'm running yt-dlp version **2021.12.25**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||
required: true
|
||||
@@ -45,12 +45,12 @@ body:
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version 2021.11.10.1 (exe)
|
||||
[debug] yt-dlp version 2021.12.25 (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (2021.11.10.1)
|
||||
yt-dlp is up to date (2021.12.25)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
4
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Feature request request
|
||||
name: Feature request
|
||||
description: Request a new functionality unrelated to any particular site or extractor
|
||||
labels: [triage, enhancement]
|
||||
body:
|
||||
@@ -11,7 +11,7 @@ body:
|
||||
options:
|
||||
- label: I'm reporting a feature request
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
- label: I've verified that I'm running yt-dlp version **2021.12.25**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
required: true
|
||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||
required: true
|
||||
|
||||
26
.github/ISSUE_TEMPLATE/6_question.yml
vendored
26
.github/ISSUE_TEMPLATE/6_question.yml
vendored
@@ -9,7 +9,7 @@ body:
|
||||
description: |
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
options:
|
||||
- label: I'm asking a question and not reporting a bug/feature request
|
||||
- label: I'm asking a question and **not** reporting a bug/feature request
|
||||
required: true
|
||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||
required: true
|
||||
@@ -24,7 +24,29 @@ body:
|
||||
description: |
|
||||
Ask your question in an arbitrary form.
|
||||
Please make sure it's worded well enough to be understood, see [is-the-description-of-the-issue-itself-sufficient](https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient).
|
||||
Provide any additional information and as much context and examples as possible
|
||||
Provide any additional information and as much context and examples as possible.
|
||||
If your question contains "isn't working" or "can you add", this is most likely the wrong template
|
||||
placeholder: WRITE QUESTION HERE
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: log
|
||||
attributes:
|
||||
label: Verbose log
|
||||
description: |
|
||||
If your question involes a yt-dlp command, provide the complete verbose output of that command.
|
||||
Add the `-Uv` flag to **your** command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version 2021.12.01 (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (2021.12.01)
|
||||
<more lines>
|
||||
render: shell
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -2,4 +2,4 @@ blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Get help from the community on Discord
|
||||
url: https://discord.gg/H5MNcFW63r
|
||||
about: Join the yt-dlp Discord for community-powered support!
|
||||
about: Join the yt-dlp Discord for community-powered support!
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
name: Broken site support
|
||||
description: Report broken or misfunctioning site
|
||||
labels: [triage, extractor-bug]
|
||||
labels: [triage, site-bug]
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
|
||||
@@ -34,7 +34,7 @@ body:
|
||||
label: Example URLs
|
||||
description: |
|
||||
Provide all kinds of example URLs for which support should be added
|
||||
value: |
|
||||
placeholder: |
|
||||
- Single video: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
- Single video: https://youtu.be/BaW_jenozKc
|
||||
- Playlist: https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
name: Site feature request
|
||||
description: Request a new functionality for a site
|
||||
description: Request a new functionality for a supported site
|
||||
labels: [triage, site-enhancement]
|
||||
body:
|
||||
- type: checkboxes
|
||||
@@ -47,3 +47,26 @@ body:
|
||||
placeholder: WRITE DESCRIPTION HERE
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: log
|
||||
attributes:
|
||||
label: Verbose log
|
||||
description: |
|
||||
Provide the complete verbose output of yt-dlp that demonstrates the need for the enhancement.
|
||||
Add the `-Uv` flag to your command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version %(version)s (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (%(version)s)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
|
||||
2
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: Bug report
|
||||
description: Report a bug unrelated to any particular site or extractor
|
||||
labels: [triage,bug]
|
||||
labels: [triage, bug]
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Feature request request
|
||||
name: Feature request
|
||||
description: Request a new functionality unrelated to any particular site or extractor
|
||||
labels: [triage, enhancement]
|
||||
body:
|
||||
|
||||
26
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
26
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
@@ -9,7 +9,7 @@ body:
|
||||
description: |
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
options:
|
||||
- label: I'm asking a question and not reporting a bug/feature request
|
||||
- label: I'm asking a question and **not** reporting a bug/feature request
|
||||
required: true
|
||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||
required: true
|
||||
@@ -24,7 +24,29 @@ body:
|
||||
description: |
|
||||
Ask your question in an arbitrary form.
|
||||
Please make sure it's worded well enough to be understood, see [is-the-description-of-the-issue-itself-sufficient](https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient).
|
||||
Provide any additional information and as much context and examples as possible
|
||||
Provide any additional information and as much context and examples as possible.
|
||||
If your question contains "isn't working" or "can you add", this is most likely the wrong template
|
||||
placeholder: WRITE QUESTION HERE
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: log
|
||||
attributes:
|
||||
label: Verbose log
|
||||
description: |
|
||||
If your question involes a yt-dlp command, provide the complete verbose output of that command.
|
||||
Add the `-Uv` flag to **your** command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version 2021.12.01 (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (2021.12.01)
|
||||
<more lines>
|
||||
render: shell
|
||||
|
||||
60
.github/workflows/build.yml
vendored
60
.github/workflows/build.yml
vendored
@@ -1,14 +1,11 @@
|
||||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- release
|
||||
on: workflow_dispatch
|
||||
|
||||
jobs:
|
||||
build_unix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version_suffix: ${{ steps.version_suffix.outputs.version_suffix }}
|
||||
ytdlp_version: ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
sha256_bin: ${{ steps.sha256_bin.outputs.sha256_bin }}
|
||||
@@ -26,23 +23,32 @@ jobs:
|
||||
python-version: '3.8'
|
||||
- name: Install packages
|
||||
run: sudo apt-get -y install zip pandoc man
|
||||
- name: Set version suffix
|
||||
id: version_suffix
|
||||
env:
|
||||
PUSH_VERSION_COMMIT: ${{ secrets.PUSH_VERSION_COMMIT }}
|
||||
if: "env.PUSH_VERSION_COMMIT == ''"
|
||||
run: echo ::set-output name=version_suffix::$(date -u +"%H%M%S")
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: |
|
||||
python devscripts/update-version.py
|
||||
python devscripts/update-version.py ${{ steps.version_suffix.outputs.version_suffix }}
|
||||
make issuetemplates
|
||||
- name: Print version
|
||||
run: echo "${{ steps.bump_version.outputs.ytdlp_version }}"
|
||||
- name: Update master
|
||||
id: push_update
|
||||
- name: Push to release
|
||||
id: push_release
|
||||
run: |
|
||||
git config --global user.email "${{ github.event.pusher.email }}"
|
||||
git config --global user.name "${{ github.event.pusher.name }}"
|
||||
git config --global user.name github-actions
|
||||
git config --global user.email github-actions@example.com
|
||||
git add -u
|
||||
git commit -m "[version] update" -m ":ci skip all"
|
||||
git pull --rebase origin ${{ github.event.repository.master_branch }}
|
||||
git push origin ${{ github.event.ref }}:${{ github.event.repository.master_branch }}
|
||||
git commit -m "[version] update" -m "Created by: ${{ github.event.sender.login }}" -m ":ci skip all"
|
||||
git push origin --force ${{ github.event.ref }}:release
|
||||
echo ::set-output name=head_sha::$(git rev-parse HEAD)
|
||||
- name: Update master
|
||||
id: push_master
|
||||
env:
|
||||
PUSH_VERSION_COMMIT: ${{ secrets.PUSH_VERSION_COMMIT }}
|
||||
if: "env.PUSH_VERSION_COMMIT != ''"
|
||||
run: git push origin ${{ github.event.ref }}
|
||||
- name: Get Changelog
|
||||
id: get_changelog
|
||||
run: |
|
||||
@@ -113,7 +119,7 @@ jobs:
|
||||
with:
|
||||
tag_name: ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||
release_name: yt-dlp ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||
commitish: ${{ steps.push_update.outputs.head_sha }}
|
||||
commitish: ${{ steps.push_release.outputs.head_sha }}
|
||||
body: |
|
||||
#### [A description of the various files]((https://github.com/yt-dlp/yt-dlp#release-files)) are in the README
|
||||
|
||||
@@ -146,7 +152,6 @@ jobs:
|
||||
build_macos:
|
||||
runs-on: macos-11
|
||||
needs: build_unix
|
||||
if: False
|
||||
outputs:
|
||||
sha256_macos: ${{ steps.sha256_macos.outputs.sha256_macos }}
|
||||
sha512_macos: ${{ steps.sha512_macos.outputs.sha512_macos }}
|
||||
@@ -156,10 +161,11 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
# In order to create a universal2 application, the version of python3 in /usr/bin has to be used
|
||||
# Pyinstaller is pinned to 4.5.1 because the builds are failing in 4.6, 4.7
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
brew install coreutils
|
||||
/usr/bin/python3 -m pip install -U --user pip Pyinstaller mutagen pycryptodomex websockets
|
||||
/usr/bin/python3 -m pip install -U --user pip Pyinstaller==4.5.1 mutagen pycryptodomex websockets
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: /usr/bin/python3 devscripts/update-version.py
|
||||
@@ -233,7 +239,9 @@ jobs:
|
||||
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-4.5.1-py3-none-any.whl" mutagen pycryptodomex websockets
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: python devscripts/update-version.py
|
||||
env:
|
||||
version_suffix: ${{ needs.build_unix.outputs.version_suffix }}
|
||||
run: python devscripts/update-version.py ${{ env.version_suffix }}
|
||||
- name: Build lazy extractors
|
||||
id: lazy_extractors
|
||||
run: python devscripts/make_lazy_extractors.py
|
||||
@@ -320,7 +328,9 @@ jobs:
|
||||
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-4.5.1-py3-none-any.whl" mutagen pycryptodomex websockets
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: python devscripts/update-version.py
|
||||
env:
|
||||
version_suffix: ${{ needs.build_unix.outputs.version_suffix }}
|
||||
run: python devscripts/update-version.py ${{ env.version_suffix }}
|
||||
- name: Build lazy extractors
|
||||
id: lazy_extractors
|
||||
run: python devscripts/make_lazy_extractors.py
|
||||
@@ -345,7 +355,7 @@ jobs:
|
||||
|
||||
finish:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build_unix, build_windows, build_windows32]
|
||||
needs: [build_unix, build_windows, build_windows32, build_macos]
|
||||
|
||||
steps:
|
||||
- name: Make SHA2-256SUMS file
|
||||
@@ -365,8 +375,8 @@ jobs:
|
||||
echo "${{ env.SHA256_PY2EXE }} yt-dlp_min.exe" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_WIN32 }} yt-dlp_x86.exe" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_WIN_ZIP }} yt-dlp_win.zip" >> SHA2-256SUMS
|
||||
# echo "${{ env.SHA256_MACOS }} yt-dlp_macos" >> SHA2-256SUMS
|
||||
# echo "${{ env.SHA256_MACOS_ZIP }} yt-dlp_macos.zip" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_MACOS }} yt-dlp_macos" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_MACOS_ZIP }} yt-dlp_macos.zip" >> SHA2-256SUMS
|
||||
- name: Upload 256SUMS file
|
||||
id: upload-sums
|
||||
uses: actions/upload-release-asset@v1
|
||||
@@ -394,8 +404,8 @@ jobs:
|
||||
echo "${{ env.SHA512_WIN_ZIP }} yt-dlp_win.zip" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_PY2EXE }} yt-dlp_min.exe" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_WIN32 }} yt-dlp_x86.exe" >> SHA2-512SUMS
|
||||
# echo "${{ env.SHA512_MACOS }} yt-dlp_macos" >> SHA2-512SUMS
|
||||
# echo "${{ env.SHA512_MACOS_ZIP }} yt-dlp_macos.zip" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_MACOS }} yt-dlp_macos" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_MACOS_ZIP }} yt-dlp_macos.zip" >> SHA2-512SUMS
|
||||
- name: Upload 512SUMS file
|
||||
id: upload-512sums
|
||||
uses: actions/upload-release-asset@v1
|
||||
|
||||
69
.gitignore
vendored
69
.gitignore
vendored
@@ -1,48 +1,54 @@
|
||||
# Config
|
||||
*.conf
|
||||
*.spec
|
||||
cookies
|
||||
*cookies.txt
|
||||
.netrc
|
||||
|
||||
# Downloaded
|
||||
*.srt
|
||||
*.ttml
|
||||
*.sbv
|
||||
*.vtt
|
||||
*.flv
|
||||
*.mp4
|
||||
*.m4a
|
||||
*.m4v
|
||||
*.mp3
|
||||
*.3gp
|
||||
*.webm
|
||||
*.wav
|
||||
*.ape
|
||||
*.mkv
|
||||
*.flac
|
||||
*.avi
|
||||
*.swf
|
||||
*.part
|
||||
*.part-*
|
||||
*.ytdl
|
||||
*.annotations.xml
|
||||
*.aria2
|
||||
*.description
|
||||
*.dump
|
||||
*.frag
|
||||
*.frag.aria2
|
||||
*.frag.urls
|
||||
*.aria2
|
||||
*.swp
|
||||
*.ogg
|
||||
*.opus
|
||||
*.info.json
|
||||
*.live_chat.json
|
||||
*.jpg
|
||||
*.jpeg
|
||||
*.png
|
||||
*.webp
|
||||
*.annotations.xml
|
||||
*.description
|
||||
*.part*
|
||||
*.unknown_video
|
||||
*.ytdl
|
||||
.cache/
|
||||
|
||||
*.3gp
|
||||
*.ape
|
||||
*.avi
|
||||
*.desktop
|
||||
*.flac
|
||||
*.flv
|
||||
*.jpeg
|
||||
*.jpg
|
||||
*.m4a
|
||||
*.m4v
|
||||
*.mhtml
|
||||
*.mkv
|
||||
*.mov
|
||||
*.mp3
|
||||
*.mp4
|
||||
*.ogg
|
||||
*.opus
|
||||
*.png
|
||||
*.sbv
|
||||
*.srt
|
||||
*.swf
|
||||
*.swp
|
||||
*.ttml
|
||||
*.url
|
||||
*.vtt
|
||||
*.wav
|
||||
*.webloc
|
||||
*.webm
|
||||
*.webp
|
||||
|
||||
# Allow config/media files in testdata
|
||||
!test/**
|
||||
|
||||
@@ -80,7 +86,6 @@ README.txt
|
||||
*.1
|
||||
*.bash-completion
|
||||
*.fish
|
||||
*.exe
|
||||
*.tar.gz
|
||||
*.zsh
|
||||
*.spec
|
||||
|
||||
@@ -10,6 +10,7 @@ # CONTRIBUTING TO YT-DLP
|
||||
- [Does the issue involve one problem, and one problem only?](#does-the-issue-involve-one-problem-and-one-problem-only)
|
||||
- [Is anyone going to need the feature?](#is-anyone-going-to-need-the-feature)
|
||||
- [Is your question about yt-dlp?](#is-your-question-about-yt-dlp)
|
||||
- [Are you willing to share account details if needed?](#are-you-willing-to-share-account-details-if-needed)
|
||||
- [DEVELOPER INSTRUCTIONS](#developer-instructions)
|
||||
- [Adding new feature or making overarching changes](#adding-new-feature-or-making-overarching-changes)
|
||||
- [Adding support for a new site](#adding-support-for-a-new-site)
|
||||
@@ -209,13 +210,13 @@ ## Adding support for a new site
|
||||
```
|
||||
1. Add an import in [`yt_dlp/extractor/extractors.py`](yt_dlp/extractor/extractors.py).
|
||||
1. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, the tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in. You can also run all the tests in one go with `TestDownload.test_YourExtractor_all`
|
||||
1. Make sure you have atleast one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the purticular test is disabled from running.
|
||||
1. Make sure you have atleast one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the particular test is disabled from running.
|
||||
1. Have a look at [`yt_dlp/extractor/common.py`](yt_dlp/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](yt_dlp/extractor/common.py#L91-L426). Add tests and code for as many as you want.
|
||||
1. Make sure your code follows [yt-dlp coding conventions](#yt-dlp-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
||||
|
||||
$ flake8 yt_dlp/extractor/yourextractor.py
|
||||
|
||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.6 and above. Backward compatability is not required for even older versions of Python.
|
||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.6 and above. Backward compatibility is not required for even older versions of Python.
|
||||
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||
|
||||
$ git add yt_dlp/extractor/extractors.py
|
||||
@@ -227,6 +228,13 @@ ## Adding support for a new site
|
||||
|
||||
In any case, thank you very much for your contributions!
|
||||
|
||||
**Tip:** To test extractors that require login information, create a file `test/local_parameters.json` and add `"usenetrc": true` or your username and password in it:
|
||||
```json
|
||||
{
|
||||
"username": "your user name",
|
||||
"password": "your password"
|
||||
}
|
||||
```
|
||||
|
||||
## yt-dlp coding conventions
|
||||
|
||||
@@ -243,7 +251,7 @@ ### Mandatory and optional metafields
|
||||
- `title` (media title)
|
||||
- `url` (media download URL) or `formats`
|
||||
|
||||
The aforementioned metafields are the critical data that the extraction does not make any sense without and if any of them fail to be extracted then the extractor is considered completely broken. While, in fact, only `id` is technically mandatory, due to compatability reasons, yt-dlp also treats `title` as mandatory. The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract usefull information with `--ignore-no-formats-error` - Eg: when the video is a live stream that has not started yet.
|
||||
The aforementioned metafields are the critical data that the extraction does not make any sense without and if any of them fail to be extracted then the extractor is considered completely broken. While, in fact, only `id` is technically mandatory, due to compatibility reasons, yt-dlp also treats `title` as mandatory. The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract usefull information with `--ignore-no-formats-error` - Eg: when the video is a live stream that has not started yet.
|
||||
|
||||
[Any field](yt_dlp/extractor/common.py#219-L426) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
||||
|
||||
|
||||
38
CONTRIBUTORS
38
CONTRIBUTORS
@@ -139,3 +139,41 @@ rhendric
|
||||
sdomi
|
||||
selfisekai
|
||||
stanoarn
|
||||
0xA7404A/Aurora
|
||||
4a1e2y5
|
||||
aarubui
|
||||
chio0hai
|
||||
cntrl-s
|
||||
Deer-Spangle
|
||||
DEvmIb
|
||||
Grabien
|
||||
j54vc1bk
|
||||
mpeter50
|
||||
mrpapersonic
|
||||
pabs3
|
||||
staubichsauger
|
||||
xenova
|
||||
Yakabuff
|
||||
zulaport
|
||||
ehoogeveen-medweb
|
||||
PilzAdam
|
||||
zmousm
|
||||
iw0nderhow
|
||||
unit193
|
||||
TwoThousandHedgehogs
|
||||
Jertzukka
|
||||
cypheron
|
||||
Hyeeji
|
||||
bwildenhain
|
||||
C0D3D3V
|
||||
kebianizao
|
||||
Lapin0t
|
||||
abdullah-if
|
||||
DavidSkrundz
|
||||
mkubecek
|
||||
raleeper
|
||||
YuenSzeHong
|
||||
Sematre
|
||||
jaller94
|
||||
r5d
|
||||
julien-hadleyjack
|
||||
|
||||
319
Changelog.md
319
Changelog.md
@@ -5,15 +5,216 @@ # Instuctions for creating release
|
||||
|
||||
* Run `make doc`
|
||||
* Update Changelog.md and CONTRIBUTORS
|
||||
* Change "Merged with ytdl" version in Readme.md if needed
|
||||
* Add new/fixed extractors in "new features" section of Readme.md
|
||||
* Commit as `Release <version>`
|
||||
* Push to origin/release using `git push origin master:release`
|
||||
build task will now run
|
||||
|
||||
* Change "Based on ytdl" version in Readme.md if needed
|
||||
* Commit as `Release <version>` and push to master
|
||||
* Dispatch the workflow https://github.com/yt-dlp/yt-dlp/actions/workflows/build.yml on master
|
||||
-->
|
||||
|
||||
|
||||
### 2021.12.25
|
||||
|
||||
* [dash,youtube] **Download live from start to end** by [nao20010128nao](https://github.com/nao20010128nao), [pukkandan](https://github.com/pukkandan)
|
||||
* Add option `--live-from-start` to enable downloading live videos from start
|
||||
* Add key `is_from_start` in formats to identify formats (of live videos) that downloads from start
|
||||
* [dash] Create protocol `http_dash_segments_generator` that allows a function to be passed instead of fragments
|
||||
* [fragment] Allow multiple live dash formats to download simultaneously
|
||||
* [youtube] Implement fragment re-fetching for the live dash formats
|
||||
* [youtube] Re-extract dash manifest every 5 hours (manifest expires in 6hrs)
|
||||
* [postprocessor/ffmpeg] Add `FFmpegFixupDuplicateMoovPP` to fixup duplicated moov atoms
|
||||
* Known issues:
|
||||
* Ctrl+C doesn't work on Windows when downloading multiple formats
|
||||
* If video becomes private, download hangs
|
||||
* [SponsorBlock] Add `Filler` and `Highlight` categories by [nihil-admirari](https://github.com/nihil-admirari), [pukkandan](https://github.com/pukkandan)
|
||||
* Change `--sponsorblock-cut all` to `--sponsorblock-cut default` if you do not want filler sections to be removed
|
||||
* Add field `webpage_url_domain`
|
||||
* Add interactive format selection with `-f -`
|
||||
* Add option `--file-access-retries` by [ehoogeveen-medweb](https://github.com/ehoogeveen-medweb)
|
||||
* [outtmpl] Add alternate forms `S`, `D` and improve `id` detection
|
||||
* [outtmpl] Add operator `&` for replacement text by [PilzAdam](https://github.com/PilzAdam)
|
||||
* [EmbedSubtitle] Disable duration check temporarily
|
||||
* [extractor] Add `_search_nuxt_data` by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [extractor] Ignore errors in comment extraction when `-i` is given
|
||||
* [extractor] Standardize `_live_title`
|
||||
* [FormatSort] Prevent incorrect deprecation warning
|
||||
* [generic] Extract m3u8 formats from JSON-LD
|
||||
* [postprocessor/ffmpeg] Always add `faststart`
|
||||
* [utils] Fix parsing `YYYYMMDD` dates in Nov/Dec by [wlritchi](https://github.com/wlritchi)
|
||||
* [utils] Improve `parse_count`
|
||||
* [utils] Update `std_headers` by [kikuyan](https://github.com/kikuyan), [fstirlitz](https://github.com/fstirlitz)
|
||||
* [lazy_extractors] Fix for search IEs
|
||||
* [extractor] Support default implicit graph in JSON-LD by [zmousm](https://github.com/zmousm)
|
||||
* Allow `--no-write-thumbnail` to override `--write-all-thumbnail`
|
||||
* Fix `--throttled-rate`
|
||||
* Fix control characters being printed to `--console-title`
|
||||
* Fix PostProcessor hooks not registered for some PPs
|
||||
* Pre-process when using `--flat-playlist`
|
||||
* Remove known invalid thumbnails from `info_dict`
|
||||
* Add warning when using `-f best`
|
||||
* Use `parse_duration` for `--wait-for-video` and some minor fix
|
||||
* [test/download] Add more fields
|
||||
* [test/download] Ignore field `webpage_url_domain` by [std-move](https://github.com/std-move)
|
||||
* [compat] Suppress errors in enabling VT mode
|
||||
* [docs] Improve manpage format by [iw0nderhow](https://github.com/iw0nderhow), [pukkandan](https://github.com/pukkandan)
|
||||
* [docs,cleanup] Minor fixes and cleanup
|
||||
* [cleanup] Fix some typos by [unit193](https://github.com/unit193)
|
||||
* [ABC:iview] Add show extractor by [pabs3](https://github.com/pabs3)
|
||||
* [dropout] Add extractor by [TwoThousandHedgehogs](https://github.com/TwoThousandHedgehogs), [pukkandan](https://github.com/pukkandan)
|
||||
* [GameJolt] Add extractors by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [gofile] Add extractor by [Jertzukka](https://github.com/Jertzukka), [Ashish0804](https://github.com/Ashish0804)
|
||||
* [hse] Add extractors by [cypheron](https://github.com/cypheron), [pukkandan](https://github.com/pukkandan)
|
||||
* [NateTV] Add NateIE and NateProgramIE by [Ashish0804](https://github.com/Ashish0804), [Hyeeji](https://github.com/Hyeeji)
|
||||
* [OpenCast] Add extractors by [bwildenhain](https://github.com/bwildenhain), [C0D3D3V](https://github.com/C0D3D3V)
|
||||
* [rtve] Add `RTVEAudioIE` by [kebianizao](https://github.com/kebianizao)
|
||||
* [Rutube] Add RutubeChannelIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [skeb] Add extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [soundcloud] Add related tracks extractor by [Lapin0t](https://github.com/Lapin0t)
|
||||
* [toggo] Add extractor by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [TrueID] Add extractor by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [audiomack] Update album and song VALID_URL by [abdullah-if](https://github.com/abdullah-if), [dirkf](https://github.com/dirkf)
|
||||
* [CBC Gem] Extract 1080p formats by [DavidSkrundz](https://github.com/DavidSkrundz)
|
||||
* [ceskatelevize] Fetch iframe from nextJS data by [mkubecek](https://github.com/mkubecek)
|
||||
* [crackle] Look for non-DRM formats by [raleeper](https://github.com/raleeper)
|
||||
* [dplay] Temporary fix for `discoveryplus.com/it`
|
||||
* [DiscoveryPlusShowBaseIE] yield actual video id by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Facebook] Handle redirect URLs
|
||||
* [fujitv] Extract 1080p from `tv_android` m3u8 by [YuenSzeHong](https://github.com/YuenSzeHong)
|
||||
* [gronkh] Support new URL pattern by [Sematre](https://github.com/Sematre)
|
||||
* [instagram] Expand valid URL by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [Instagram] Try bypassing login wall with embed page by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [Jamendo] Fix use of `_VALID_URL_RE` by [jaller94](https://github.com/jaller94)
|
||||
* [LBRY] Support livestreams by [Ashish0804](https://github.com/Ashish0804), [pukkandan](https://github.com/pukkandan)
|
||||
* [NJPWWorld] Extract formats from m3u8 by [aarubui](https://github.com/aarubui)
|
||||
* [NovaEmbed] update player regex by [std-move](https://github.com/std-move)
|
||||
* [npr] Make SMIL extraction non-fatal by [r5d](https://github.com/r5d)
|
||||
* [ntvcojp] Extract NUXT data by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [ok.ru] add mobile fallback by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [olympics] Add uploader and cleanup by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [ondemandkorea] Update `jw_config` regex by [julien-hadleyjack](https://github.com/julien-hadleyjack)
|
||||
* [PlutoTV] Expand `_VALID_URL`
|
||||
* [RaiNews] Fix extractor by [nixxo](https://github.com/nixxo)
|
||||
* [RCTIPlusSeries] Lazy extraction and video type selection by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [redtube] Handle formats delivered inside a JSON by [dirkf](https://github.com/dirkf), [nixxo](https://github.com/nixxo)
|
||||
* [SonyLiv] Add OTP login support by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Steam] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [TikTok] Pass cookies to mobile API by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [trovo] Fix inheritance of `TrovoChannelBaseIE`
|
||||
* [TVer] Extract better thumbnails by [YuenSzeHong](https://github.com/YuenSzeHong)
|
||||
* [vimeo] Extract chapters
|
||||
* [web.archive:youtube] Improve metadata extraction by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube:comments] Add more options for limiting number of comments extracted by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube:tab] Extract more metadata from feeds/channels/playlists by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube:tab] Extract video thumbnails from playlist by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube:tab] Ignore query when redirecting channel to playlist and cleanup of related code Closes #2046
|
||||
* [youtube] Fix `ytsearchdate`
|
||||
* [zdf] Support videos with different ptmd location by [iw0nderhow](https://github.com/iw0nderhow)
|
||||
* [zee5] Support /episodes in URL
|
||||
|
||||
|
||||
### 2021.12.01
|
||||
|
||||
* **Add option `--wait-for-video` to wait for scheduled streams**
|
||||
* Add option `--break-per-input` to apply --break-on... to each input URL
|
||||
* Add option `--embed-info-json` to embed info.json in mkv
|
||||
* Add compat-option `embed-metadata`
|
||||
* Allow using a custom format selector through API
|
||||
* [AES] Add ECB mode by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [build] Fix MacOS Build
|
||||
* [build] Save Git HEAD at release alongside version info
|
||||
* [build] Use `workflow_dispatch` for release
|
||||
* [downloader/ffmpeg] Fix for direct videos inside mpd manifests
|
||||
* [downloader] Add colors to download progress
|
||||
* [EmbedSubtitles] Slightly relax duration check and related cleanup
|
||||
* [ExtractAudio] Fix conversion to `wav` and `vorbis`
|
||||
* [ExtractAudio] Support `alac`
|
||||
* [extractor] Extract `average_rating` from JSON-LD
|
||||
* [FixupM3u8] Fixup MPEG-TS in MP4 container
|
||||
* [generic] Support mpd manifests without extension by [shirt](https://github.com/shirt-dev)
|
||||
* [hls] Better FairPlay DRM detection by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [jsinterp] Fix splice to handle float (for youtube js player f1ca6900)
|
||||
* [utils] Allow alignment in `render_table` and add tests
|
||||
* [utils] Fix `PagedList`
|
||||
* [utils] Fix error when copying `LazyList`
|
||||
* Clarify video/audio-only formats in -F
|
||||
* Ensure directory exists when checking formats
|
||||
* Ensure path for link files exists by [Zirro](https://github.com/Zirro)
|
||||
* Ensure same config file is not loaded multiple times
|
||||
* Fix `postprocessor_hooks`
|
||||
* Fix `--break-on-archive` when pre-checking
|
||||
* Fix `--check-formats` for `mhtml`
|
||||
* Fix `--load-info-json` of playlists with failed entries
|
||||
* Fix `--trim-filename` when filename has `.`
|
||||
* Fix bug in parsing `--add-header`
|
||||
* Fix error in `report_unplayable_conflict` by [shirt](https://github.com/shirt-dev)
|
||||
* Fix writing playlist infojson with `--no-clean-infojson`
|
||||
* Validate --get-bypass-country
|
||||
* [blogger] Add extractor by [pabs3](https://github.com/pabs3)
|
||||
* [breitbart] Add extractor by [Grabien](https://github.com/Grabien)
|
||||
* [CableAV] Add extractor by [j54vc1bk](https://github.com/j54vc1bk)
|
||||
* [CanalAlpha] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [CozyTV] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [CPTwentyFour] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [DiscoveryPlus] Add `DiscoveryPlusItalyShowIE` by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [ESPNCricInfo] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [LinkedIn] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [mixch] Add extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [nebula] Add `NebulaCollectionIE` and rewrite extractor by [hheimbuerger](https://github.com/hheimbuerger)
|
||||
* [OneFootball] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [peer.tv] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [radiozet] Add extractor by [0xA7404A](https://github.com/0xA7404A) (Aurora)
|
||||
* [redgifs] Add extractor by [chio0hai](https://github.com/chio0hai)
|
||||
* [RedGifs] Add Search and User extractors by [Deer-Spangle](https://github.com/Deer-Spangle)
|
||||
* [rtrfm] Add extractor by [pabs3](https://github.com/pabs3)
|
||||
* [Streamff] Add extractor by [cntrl-s](https://github.com/cntrl-s)
|
||||
* [Stripchat] Add extractor by [zulaport](https://github.com/zulaport)
|
||||
* [Aljazeera] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [AmazonStoreIE] Fix regex to not match vdp urls by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [ARDBetaMediathek] Handle new URLs
|
||||
* [bbc] Get all available formats by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [Bilibili] Fix title extraction by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [CBC Gem] Fix for shows that don't have all seasons by [makeworld-the-better-one](https://github.com/makeworld-the-better-one)
|
||||
* [curiositystream] Add more metadata
|
||||
* [CuriosityStream] Fix series
|
||||
* [DiscoveryPlus] Rewrite extractors by [Ashish0804](https://github.com/Ashish0804), [pukkandan](https://github.com/pukkandan)
|
||||
* [HotStar] Set language field from tags by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [instagram, cleanup] Refactor extractors
|
||||
* [Instagram] Display more login errors by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [itv] Fix extractor by [staubichsauger](https://github.com/staubichsauger), [pukkandan](https://github.com/pukkandan)
|
||||
* [mediaklikk] Expand valid URL
|
||||
* [MTV] Improve mgid extraction by [Sipherdrakon](https://github.com/Sipherdrakon), [kikuyan](https://github.com/kikuyan)
|
||||
* [nexx] Better error message for unsupported format
|
||||
* [NovaEmbed] Fix extractor by [pukkandan](https://github.com/pukkandan), [std-move](https://github.com/std-move)
|
||||
* [PatreonUser] Do not capture RSS URLs
|
||||
* [Reddit] Add support for 1080p videos by [xenova](https://github.com/xenova)
|
||||
* [RoosterTeethSeries] Fix for multiple pages by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [sbs] Fix for movies and livestreams
|
||||
* [Senate.gov] Add SenateGovIE and fix SenateISVPIE by [Grabien](https://github.com/Grabien), [pukkandan](https://github.com/pukkandan)
|
||||
* [soundcloud:search] Fix pagination
|
||||
* [tiktok:user] Set `webpage_url` correctly
|
||||
* [Tokentube] Fix description by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [trovo] Fix extractor by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [tv2] Expand valid URL
|
||||
* [Tvplayhome] Fix extractor by [pukkandan](https://github.com/pukkandan), [18928172992817182](https://github.com/18928172992817182)
|
||||
* [Twitch:vod] Add chapters by [mpeter50](https://github.com/mpeter50)
|
||||
* [twitch:vod] Extract live status by [DEvmIb](https://github.com/DEvmIb)
|
||||
* [VidLii] Add 720p support by [mrpapersonic](https://github.com/mrpapersonic)
|
||||
* [vimeo] Add fallback for config URL
|
||||
* [vimeo] Sort http formats higher
|
||||
* [WDR] Expand valid URL
|
||||
* [willow] Add extractor by [aarubui](https://github.com/aarubui)
|
||||
* [xvideos] Detect embed URLs by [4a1e2y5](https://github.com/4a1e2y5)
|
||||
* [xvideos] Fix extractor by [Yakabuff](https://github.com/Yakabuff)
|
||||
* [youtube, cleanup] Reorganize Tab and Search extractor inheritances
|
||||
* [youtube:search_url] Add playlist/channel support
|
||||
* [youtube] Add `default` player client by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Add storyboard formats
|
||||
* [youtube] Decrypt n-sig for URLs with `ratebypass`
|
||||
* [youtube] Minor improvement to format sorting
|
||||
* [cleanup] Add deprecation warnings
|
||||
* [cleanup] Refactor `JSInterpreter._seperate`
|
||||
* [Cleanup] Remove some unnecessary groups in regexes by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [cleanup] Misc cleanup
|
||||
|
||||
|
||||
### 2021.11.10.1
|
||||
|
||||
* Temporarily disable MacOS Build
|
||||
@@ -40,7 +241,7 @@ ### 2021.11.10
|
||||
* [fragment] Fix progress display in fragmented downloads
|
||||
* [downloader/ffmpeg] Fix vtt download with ffmpeg
|
||||
* [ffmpeg] Detect presence of setts and libavformat version
|
||||
* [ExtractAudio] Rescale --audio-quality correctly by [CrypticSignal](https://github.com/CrypticSignal), [pukkandan](https://github.com/pukkandan)
|
||||
* [ExtractAudio] Rescale `--audio-quality` correctly by [CrypticSignal](https://github.com/CrypticSignal), [pukkandan](https://github.com/pukkandan)
|
||||
* [ExtractAudio] Use `libfdk_aac` if available by [CrypticSignal](https://github.com/CrypticSignal)
|
||||
* [FormatSort] `eac3` is better than `ac3`
|
||||
* [FormatSort] Fix some fields' defaults
|
||||
@@ -1372,7 +1573,7 @@ ### 2021.01.05
|
||||
* Cleaned up the fork for public use
|
||||
|
||||
|
||||
**PS**: All uncredited changes above this point are authored by [pukkandan](https://github.com/pukkandan)
|
||||
**Note**: All uncredited changes above this point are authored by [pukkandan](https://github.com/pukkandan)
|
||||
|
||||
### Unreleased changes in [blackjack4494/yt-dlc](https://github.com/blackjack4494/yt-dlc)
|
||||
* Updated to youtube-dl release 2020.11.26 by [pukkandan](https://github.com/pukkandan)
|
||||
@@ -1397,8 +1598,110 @@ ### Unreleased changes in [blackjack4494/yt-dlc](https://github.com/blackjack449
|
||||
* [spreaker] fix SpreakerShowIE test URL by [pukkandan](https://github.com/pukkandan)
|
||||
* [Vlive] Fix playlist handling when downloading a channel by [kyuyeunk](https://github.com/kyuyeunk)
|
||||
* [tmz] Fix extractor by [diegorodriguezv](https://github.com/diegorodriguezv)
|
||||
* [ITV] BTCC URL update by [WolfganP](https://github.com/WolfganP)
|
||||
* [generic] Detect embedded bitchute videos by [pukkandan](https://github.com/pukkandan)
|
||||
* [generic] Extract embedded youtube and twitter videos by [diegorodriguezv](https://github.com/diegorodriguezv)
|
||||
* [ffmpeg] Ensure all streams are copied by [pukkandan](https://github.com/pukkandan)
|
||||
* [embedthumbnail] Fix for os.rename error by [pukkandan](https://github.com/pukkandan)
|
||||
* make_win.bat: don't use UPX to pack vcruntime140.dll by [jbruchon](https://github.com/jbruchon)
|
||||
|
||||
|
||||
### Changelog of [blackjack4494/yt-dlc](https://github.com/blackjack4494/yt-dlc) till release 2020.11.11-3
|
||||
|
||||
**Note**: This was constructed from the merge commit messages and may not be entirely accurate
|
||||
|
||||
* [bandcamp] fix failing test. remove subclass hack by [insaneracist](https://github.com/insaneracist)
|
||||
* [bandcamp] restore album downloads by [insaneracist](https://github.com/insaneracist)
|
||||
* [francetv] fix extractor by [Surkal](https://github.com/Surkal)
|
||||
* [gdcvault] fix extractor by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [hotstar] Move to API v1 by [theincognito-inc](https://github.com/theincognito-inc)
|
||||
* [hrfernsehen] add extractor by [blocktrron](https://github.com/blocktrron)
|
||||
* [kakao] new apis by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [la7] fix missing protocol by [nixxo](https://github.com/nixxo)
|
||||
* [mailru] removed escaped braces, use urljoin, added tests by [nixxo](https://github.com/nixxo)
|
||||
* [MTV/Nick] universal mgid extractor + fix nick.de feed by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [mtv] Fix a missing match_id by [nixxo](https://github.com/nixxo)
|
||||
* [Mtv] updated extractor logic & more by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [ndr] support Daserste ndr by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [Netzkino] Only use video id to find metadata by [TobiX](https://github.com/TobiX)
|
||||
* [newgrounds] fix: video download by [insaneracist](https://github.com/insaneracist)
|
||||
* [nitter] Add new extractor by [B0pol](https://github.com/B0pol)
|
||||
* [soundcloud] Resolve audio/x-wav by [tfvlrue](https://github.com/tfvlrue)
|
||||
* [soundcloud] sets pattern and tests by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [SouthparkDE/MTV] another mgid extraction (mtv_base) feed url updated by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [StoryFire] Add new extractor by [sgstair](https://github.com/sgstair)
|
||||
* [twitch] by [geauxlo](https://github.com/geauxlo)
|
||||
* [videa] Adapt to updates by [adrianheine](https://github.com/adrianheine)
|
||||
* [Viki] subtitles, formats by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [vlive] fix extractor for revamped website by [exwm](https://github.com/exwm)
|
||||
* [xtube] fix extractor by [insaneracist](https://github.com/insaneracist)
|
||||
* [youtube] Convert subs when download is skipped by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [youtube] Fix age gate detection by [random-nick](https://github.com/random-nick)
|
||||
* [youtube] fix yt-only playback when age restricted/gated - requires cookies by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [youtube] fix: extract artist metadata from ytInitialData by [insaneracist](https://github.com/insaneracist)
|
||||
* [youtube] fix: extract mix playlist ids from ytInitialData by [insaneracist](https://github.com/insaneracist)
|
||||
* [youtube] fix: mix playlist title by [insaneracist](https://github.com/insaneracist)
|
||||
* [youtube] fix: Youtube Music playlists by [insaneracist](https://github.com/insaneracist)
|
||||
* [Youtube] Fixed problem with new youtube player by [peet1993](https://github.com/peet1993)
|
||||
* [zoom] Fix url parsing for url's containing /share/ and dots by [Romern](https://github.com/Romern)
|
||||
* [zoom] new extractor by [insaneracist](https://github.com/insaneracist)
|
||||
* abc by [adrianheine](https://github.com/adrianheine)
|
||||
* Added Comcast_SSO fix by [merval](https://github.com/merval)
|
||||
* Added DRM logic to brightcove by [merval](https://github.com/merval)
|
||||
* Added regex for ABC.com site. by [kucksdorfs](https://github.com/kucksdorfs)
|
||||
* alura by [hugohaa](https://github.com/hugohaa)
|
||||
* Arbitrary merges by [fstirlitz](https://github.com/fstirlitz)
|
||||
* ard.py_add_playlist_support by [martin54](https://github.com/martin54)
|
||||
* Bugfix/youtube/chapters fix extractor by [gschizas](https://github.com/gschizas)
|
||||
* bugfix_youtube_like_extraction by [RedpointsBots](https://github.com/RedpointsBots)
|
||||
* Create build workflow by [blackjack4494](https://github.com/blackjack4494)
|
||||
* deezer by [LucBerge](https://github.com/LucBerge)
|
||||
* Detect embedded bitchute videos by [pukkandan](https://github.com/pukkandan)
|
||||
* Don't install tests by [l29ah](https://github.com/l29ah)
|
||||
* Don't try to embed/convert json subtitles generated by [youtube](https://github.com/youtube) livechat by [pukkandan](https://github.com/pukkandan)
|
||||
* Doodstream by [sxvghd](https://github.com/sxvghd)
|
||||
* duboku by [lkho](https://github.com/lkho)
|
||||
* elonet by [tpikonen](https://github.com/tpikonen)
|
||||
* ext/remuxe-video by [Zocker1999NET](https://github.com/Zocker1999NET)
|
||||
* fall-back to the old way to fetch subtitles, if needed by [RobinD42](https://github.com/RobinD42)
|
||||
* feature_subscriber_count by [RedpointsBots](https://github.com/RedpointsBots)
|
||||
* Fix external downloader when there is no http_header by [pukkandan](https://github.com/pukkandan)
|
||||
* Fix issue triggered by [tubeup](https://github.com/tubeup) by [nsapa](https://github.com/nsapa)
|
||||
* Fix YoutubePlaylistsIE by [ZenulAbidin](https://github.com/ZenulAbidin)
|
||||
* fix-mitele' by [DjMoren](https://github.com/DjMoren)
|
||||
* fix/google-drive-cookie-issue by [legraphista](https://github.com/legraphista)
|
||||
* fix_tiktok by [mervel-mervel](https://github.com/mervel-mervel)
|
||||
* Fixed problem with JS player URL by [peet1993](https://github.com/peet1993)
|
||||
* fixYTSearch by [xarantolus](https://github.com/xarantolus)
|
||||
* FliegendeWurst-3sat-zdf-merger-bugfix-feature
|
||||
* gilou-bandcamp_update
|
||||
* implement ThisVid extractor by [rigstot](https://github.com/rigstot)
|
||||
* JensTimmerman-patch-1 by [JensTimmerman](https://github.com/JensTimmerman)
|
||||
* Keep download archive in memory for better performance by [jbruchon](https://github.com/jbruchon)
|
||||
* la7-fix by [iamleot](https://github.com/iamleot)
|
||||
* magenta by [adrianheine](https://github.com/adrianheine)
|
||||
* Merge 26564 from [adrianheine](https://github.com/adrianheine)
|
||||
* Merge code from [ddland](https://github.com/ddland)
|
||||
* Merge code from [nixxo](https://github.com/nixxo)
|
||||
* Merge code from [ssaqua](https://github.com/ssaqua)
|
||||
* Merge code from [zubearc](https://github.com/zubearc)
|
||||
* mkvthumbnail by [MrDoritos](https://github.com/MrDoritos)
|
||||
* myvideo_ge by [fonkap](https://github.com/fonkap)
|
||||
* naver by [SeonjaeHyeon](https://github.com/SeonjaeHyeon)
|
||||
* ondemandkorea by [julien-hadleyjack](https://github.com/julien-hadleyjack)
|
||||
* rai-update by [iamleot](https://github.com/iamleot)
|
||||
* RFC: youtube: Polymer UI and JSON endpoints for playlists by [wlritchi](https://github.com/wlritchi)
|
||||
* rutv by [adrianheine](https://github.com/adrianheine)
|
||||
* Sc extractor web auth by [blackjack4494](https://github.com/blackjack4494)
|
||||
* Switch from binary search tree to Python sets by [jbruchon](https://github.com/jbruchon)
|
||||
* tiktok by [skyme5](https://github.com/skyme5)
|
||||
* tvnow by [TinyToweringTree](https://github.com/TinyToweringTree)
|
||||
* twitch-fix by [lel-amri](https://github.com/lel-amri)
|
||||
* Twitter shortener by [blackjack4494](https://github.com/blackjack4494)
|
||||
* Update README.md by [JensTimmerman](https://github.com/JensTimmerman)
|
||||
* Update to reflect website changes. by [amigatomte](https://github.com/amigatomte)
|
||||
* use webarchive to fix a dead link in README by [B0pol](https://github.com/B0pol)
|
||||
* Viki the second by [blackjack4494](https://github.com/blackjack4494)
|
||||
* wdr-subtitles by [mrtnmtth](https://github.com/mrtnmtth)
|
||||
* Webpfix by [alexmerkel](https://github.com/alexmerkel)
|
||||
* Youtube live chat by [siikamiika](https://github.com/siikamiika)
|
||||
|
||||
@@ -28,6 +28,7 @@ ## [coletdjnz](https://github.com/coletdjnz)
|
||||
[](https://github.com/sponsors/coletdjnz)
|
||||
|
||||
* YouTube improvements including: age-gate bypass, private playlists, multiple-clients (to avoid throttling) and a lot of under-the-hood improvements
|
||||
* Added support for downloading YoutubeWebArchive videos
|
||||
|
||||
|
||||
|
||||
|
||||
11
Makefile
11
Makefile
@@ -13,11 +13,13 @@ pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites com
|
||||
.PHONY: all clean install test tar pypi-files completions ot offlinetest codetest supportedsites
|
||||
|
||||
clean-test:
|
||||
rm -rf *.3gp *.annotations.xml *.ape *.avi *.description *.dump *.flac *.flv *.frag *.frag.aria2 *.frag.urls \
|
||||
*.info.json *.jpeg *.jpg *.live_chat.json *.m4a *.m4v *.mkv *.mp3 *.mp4 *.ogg *.opus *.part* *.png *.sbv *.srt \
|
||||
*.swf *.swp *.ttml *.vtt *.wav *.webm *.webp *.ytdl test/testdata/player-*.js
|
||||
rm -rf test/testdata/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
|
||||
*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.part* *.unknown_video *.ytdl \
|
||||
*.3gp *.ape *.avi *.desktop *.flac *.flv *.jpeg *.jpg *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 \
|
||||
*.mp4 *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
|
||||
clean-dist:
|
||||
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap
|
||||
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
|
||||
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap
|
||||
clean-cache:
|
||||
find . -name "*.pyc" -o -name "*.class" -delete
|
||||
|
||||
@@ -31,7 +33,6 @@ DESTDIR ?= .
|
||||
BINDIR ?= $(PREFIX)/bin
|
||||
MANDIR ?= $(PREFIX)/man
|
||||
SHAREDIR ?= $(PREFIX)/share
|
||||
# make_supportedsites.py doesnot work correctly in python2
|
||||
PYTHON ?= /usr/bin/env python3
|
||||
|
||||
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
||||
|
||||
342
README.md
342
README.md
@@ -1,3 +1,4 @@
|
||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||
<div align="center">
|
||||
|
||||
[](#readme)
|
||||
@@ -15,9 +16,13 @@
|
||||
[](https://pypi.org/project/yt-dlp)
|
||||
|
||||
</div>
|
||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||
|
||||
yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on the now inactive [youtube-dlc](https://github.com/blackjack4494/yt-dlc). The main focus of this project is adding new features and patches while also keeping up to date with the original project
|
||||
|
||||
<!-- MANPAGE: MOVE "USAGE AND OPTIONS" SECTION HERE -->
|
||||
|
||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||
* [NEW FEATURES](#new-features)
|
||||
* [Differences in default behavior](#differences-in-default-behavior)
|
||||
* [INSTALLATION](#installation)
|
||||
@@ -61,17 +66,17 @@
|
||||
* [Opening an Issue](CONTRIBUTING.md#opening-an-issue)
|
||||
* [Developer Instructions](CONTRIBUTING.md#developer-instructions)
|
||||
* [MORE](#more)
|
||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||
|
||||
|
||||
# NEW FEATURES
|
||||
The major new features from the latest release of [blackjack4494/yt-dlc](https://github.com/blackjack4494/yt-dlc) are:
|
||||
|
||||
* Based on **youtube-dl 2021.12.17 [commit/5014bd6](https://github.com/ytdl-org/youtube-dl/commit/5014bd67c22b421207b2650d4dc874b95b36dda1)** and **youtube-dlc 2020.11.11-3 [commit/f9401f2](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee)**: You get all the features and patches of [youtube-dlc](https://github.com/blackjack4494/yt-dlc) in addition to the latest [youtube-dl](https://github.com/ytdl-org/youtube-dl)
|
||||
|
||||
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in youtube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
||||
|
||||
* **[Format Sorting](#sorting-formats)**: The default format sorting options have been changed so that higher resolution and better codecs will be now preferred instead of simply using larger bitrate. Furthermore, you can now specify the sort order using `-S`. This allows for much easier format selection than what is possible by simply using `--format` ([examples](#format-selection-examples))
|
||||
|
||||
* **Merged with youtube-dl [commit/379f52a](https://github.com/ytdl-org/youtube-dl/commit/379f52a4954013767219d25099cce9e0f9401961)**: (v2021.06.06) You get all the latest features and patches of [youtube-dl](https://github.com/ytdl-org/youtube-dl) in addition to all the features of [youtube-dlc](https://github.com/blackjack4494/yt-dlc)
|
||||
|
||||
* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--write-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, playlist infojson etc. Note that the NicoNico improvements are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details.
|
||||
|
||||
* **Youtube improvements**:
|
||||
@@ -83,6 +88,7 @@ # NEW FEATURES
|
||||
* Redirect channel's home URL automatically to `/video` to preserve the old behaviour
|
||||
* `255kbps` audio is extracted (if available) from youtube music when premium cookies are given
|
||||
* Youtube music Albums, channels etc can be downloaded ([except self-uploaded music](https://github.com/yt-dlp/yt-dlp/issues/723))
|
||||
* Download livestreams from the start using `--live-from-start`
|
||||
|
||||
* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[:PROFILE]`
|
||||
|
||||
@@ -92,11 +98,7 @@ # NEW FEATURES
|
||||
|
||||
* **Aria2c with HLS/DASH**: You can use `aria2c` as the external downloader for DASH(mpd) and HLS(m3u8) formats
|
||||
|
||||
* **New extractors**: 17live, 3speak, amazonstore, animelab, audius, bandcampmusic, bannedvideo, biliintl, bitwave.tv, blackboardcollaborate, cam4, cgtn, chingari, ciscowebex, damtomo, discoveryplus.in, douyin, epicon, euscreen, fancode, filmmodu, gab, gedi, gettr, gopro, gotostage, gronkh, koo, manototv, mediaite, mediaklikk, mediasetshow, mediathek, microsoftstream, mildom, mirrativ, mlsscoccer, mtv.it, musescore, mxplayershow, n1, nebula, nfhsnetwork, novaplay, nzherald, olympics replay, on24, openrec, parlview-AU, peloton, planetmarathi, pluto.tv, polsatgo, polskieradio, pornflip, projectveritas, radiko, radiokapital, radlive, raiplayradio, rcs, rctiplus, saitosan, sciencechannel, shemaroome, skynews-AU, skynews-story, sovietscloset, startv, streamanity, telemundo, theta, theta, tokentube, tv2huseries, ukcolumn, utreon, veo, vidiolive, vidiopremier, voicy, vupload, whowatch, wim.tv, wppilot, youtube webarchive, zee5, zen.yandex
|
||||
|
||||
* **New playlist extractors**: bilibili categories, eroprofile albums, hotstar series, hungama albums, newgrounds user, niconico search/users, paramountplus series, patreon user, peertube playlist/channels, roosterteeth series, sonyliv series, tiktok user, trovo channels, voot series
|
||||
|
||||
* **Fixed/improved extractors**: 7plus, 9now, afreecatv, akamai, aljazeera, amcnetworks, animalplanet, archive.org, arte, atv, bbc, bilibili, bitchute, bravotv, camtube, cbc, cda, ceskatelevize, chingari, comedycentral, coub, crackle, crunchyroll, curiositystream, diynetwork, dw, eroprofile, facebook, francetv, funimation, globo, hearthisatie, hidive, hotstar, hungama, imdb, ina, instagram, iprima, itv, iwara, kakao, la7, linkedinlearning, linuxacadamy, mediaset, mediasite, motherless, mxplayer, nbcolympics, ndr, newgrounds, niconico, nitter, nova, nrk, nuvid, oreilly, paramountplus, parliamentlive, patreon, pbs, peertube, plutotv, polskieradio, pornhub, reddit, reddit, redtube, rmcdecouverte, roosterteeth, rtp, rumble, saml verizon login, skyit, sonyliv, soundcloud, southparkde, spankbang, spreaker, streamable, tagesschau, tbs, tennistv, tenplay, tiktok, tubi, tv2, tv2hu, tv5mondeplus, tvp, twitcasting, vh1, viafree, videa, vidio, vidme, viewlift, viki, vimeo, viu, vk, vlive, vrt, wakanim, xhamster, yahoo
|
||||
* **New and fixed extractors**: Many new extractors have been added and a lot of existing ones have been fixed. See the [changelog](Changelog.md) or the [list of supported sites](supportedsites.md)
|
||||
|
||||
* **New MSOs**: Philo, Spectrum, SlingTV, Cablevision, RCN
|
||||
|
||||
@@ -108,35 +110,30 @@ # NEW FEATURES
|
||||
|
||||
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata`
|
||||
|
||||
* **Other new options**: `--print`, `--sleep-requests`, `--convert-thumbnails`, `--write-link`, `--force-download-archive`, `--force-overwrites`, `--break-on-reject` etc
|
||||
* **Other new options**: Many new options have been added such as `--print`, `--wait-for-video`, `--sleep-requests`, `--convert-thumbnails`, `--write-link`, `--force-download-archive`, `--force-overwrites`, `--break-on-reject` etc
|
||||
|
||||
* **Improvements**: Regex and other operators in `--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection) etc
|
||||
* **Improvements**: Regex and other operators in `--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection), merge multi-video/audio etc
|
||||
|
||||
* **Plugins**: Extractors and PostProcessors can be loaded from an external file. See [plugins](#plugins) for details
|
||||
|
||||
* **Self-updater**: The releases can be updated using `yt-dlp -U`
|
||||
|
||||
|
||||
See [changelog](Changelog.md) or [commits](https://github.com/yt-dlp/yt-dlp/commits) for the full list of changes
|
||||
|
||||
|
||||
**PS**: Some of these changes are already in youtube-dlc, but are still unreleased. See [this](Changelog.md#unreleased-changes-in-blackjack4494yt-dlc) for details
|
||||
|
||||
If you are coming from [youtube-dl](https://github.com/ytdl-org/youtube-dl), the amount of changes are very large. Compare [options](#options) and [supported sites](supportedsites.md) with youtube-dl's to get an idea of the massive number of features/patches [youtube-dlc](https://github.com/blackjack4494/yt-dlc) has accumulated.
|
||||
|
||||
### Differences in default behavior
|
||||
|
||||
Some of yt-dlp's default options are different from that of youtube-dl and youtube-dlc:
|
||||
|
||||
* The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
|
||||
* `avconv` is not supported as as an alternative to `ffmpeg`
|
||||
* `avconv` is not supported as an alternative to `ffmpeg`
|
||||
* The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s-%(id)s.%(ext)s`. Instead, you may use `--compat-options filename`
|
||||
* The default [format sorting](#sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
|
||||
* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be prefered. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
|
||||
* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be preferred. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
|
||||
* Unlike youtube-dlc, yt-dlp does not allow merging multiple audio/video streams into one file by default (since this conflicts with the use of `-f bv*+ba`). If needed, this feature must be enabled using `--audio-multistreams` and `--video-multistreams`. You can also use `--compat-options multistreams` to enable both
|
||||
* `--ignore-errors` is enabled by default. Use `--abort-on-error` or `--compat-options abort-on-error` to abort on errors instead
|
||||
* When writing metadata files such as thumbnails, description or infojson, the same information (if available) is also written for playlists. Use `--no-write-playlist-metafiles` or `--compat-options no-playlist-metafiles` to not write these files
|
||||
* `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-infojson`. Use `--compat-options no-attach-info-json` to revert this
|
||||
* `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-info-json`. Use `--no-embed-info-json` or `--compat-options no-attach-info-json` to revert this
|
||||
* Some metadata are embedded into different fields when using `--add-metadata` as compared to youtube-dl. Most notably, `comment` field contains the `webpage_url` and `synopsis` contains the `description`. You can [use `--parse-metadata`](https://github.com/yt-dlp/yt-dlp#modifying-metadata) to modify this to your liking or use `--compat-options embed-metadata` to revert this
|
||||
* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior
|
||||
* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this
|
||||
* All *experiences* of a funimation episode are considered as a single video. This behavior breaks existing archives. Use `--compat-options seperate-video-versions` to extract information from only the default player
|
||||
@@ -146,7 +143,7 @@ ### Differences in default behavior
|
||||
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
||||
* Thumbnail embedding in `mp4` is done with mutagen if possible. Use `--compat-options embed-thumbnail-atomicparsley` to force the use of AtomicParsley instead
|
||||
* Some private fields such as filenames are removed by default from the infojson. Use `--no-clean-infojson` or `--compat-options no-clean-infojson` to revert this
|
||||
* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the seperate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this
|
||||
* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the separate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this
|
||||
|
||||
For ease of use, a few more compat options are available:
|
||||
* `--compat-options all`: Use all compat options
|
||||
@@ -175,7 +172,7 @@ ### Using the release binary
|
||||
```
|
||||
|
||||
```
|
||||
sudo aria2c https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp -o /usr/local/bin/yt-dlp
|
||||
sudo aria2c https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp --dir /usr/local/bin -o yt-dlp
|
||||
sudo chmod a+rx /usr/local/bin/yt-dlp
|
||||
```
|
||||
|
||||
@@ -195,7 +192,7 @@ ### With [PIP](https://pypi.org/project/pip)
|
||||
|
||||
If you want to be on the cutting edge, you can also install the master branch with:
|
||||
```
|
||||
python3 -m pip3 install --force-reinstall https://github.com/yt-dlp/yt-dlp/archive/master.zip
|
||||
python3 -m pip install --force-reinstall https://github.com/yt-dlp/yt-dlp/archive/master.zip
|
||||
```
|
||||
|
||||
Note that on some systems, you may need to use `py` or `python` instead of `python3`
|
||||
@@ -215,6 +212,7 @@ ## UPDATE
|
||||
|
||||
If you [installed using Homebrew](#with-homebrew), run `brew upgrade yt-dlp/taps/yt-dlp`
|
||||
|
||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||
## RELEASE FILES
|
||||
|
||||
#### Recommended
|
||||
@@ -241,6 +239,7 @@ #### Misc
|
||||
[yt-dlp.tar.gz](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)|Source tarball. Also contains manpages, completions, etc
|
||||
[SHA2-512SUMS](https://github.com/yt-dlp/yt-dlp/releases/latest/download/SHA2-512SUMS)|GNU-style SHA512 sums
|
||||
[SHA2-256SUMS](https://github.com/yt-dlp/yt-dlp/releases/latest/download/SHA2-256SUMS)|GNU-style SHA256 sums
|
||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||
|
||||
## DEPENDENCIES
|
||||
Python versions 3.6+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
||||
@@ -250,8 +249,9 @@ ## DEPENDENCIES
|
||||
On windows, [Microsoft Visual C++ 2010 SP1 Redistributable Package (x86)](https://download.microsoft.com/download/1/6/5/165255E7-1014-4D0A-B094-B6A430A6BFFC/vcredist_x86.exe) is also necessary to run yt-dlp. You probably already have this, but if the executable throws an error due to missing `MSVCR100.dll` you need to install it manually.
|
||||
-->
|
||||
|
||||
While all the other dependancies are optional, `ffmpeg` and `ffprobe` are highly recommended
|
||||
* [**ffmpeg** and **ffprobe**](https://www.ffmpeg.org) - Required for [merging seperate video and audio files](#format-selection) as well as for various [post-processing](#post-processing-options) tasks. Licence [depends on the build](https://www.ffmpeg.org/legal.html)
|
||||
While all the other dependencies are optional, `ffmpeg` and `ffprobe` are highly recommended
|
||||
|
||||
* [**ffmpeg** and **ffprobe**](https://www.ffmpeg.org) - Required for [merging separate video and audio files](#format-selection) as well as for various [post-processing](#post-processing-options) tasks. License [depends on the build](https://www.ffmpeg.org/legal.html)
|
||||
* [**mutagen**](https://github.com/quodlibet/mutagen) - For embedding thumbnail in certain formats. Licensed under [GPLv2+](https://github.com/quodlibet/mutagen/blob/master/COPYING)
|
||||
* [**pycryptodomex**](https://github.com/Legrandin/pycryptodome) - For decrypting AES-128 HLS streams and various other data. Licensed under [BSD2](https://github.com/Legrandin/pycryptodome/blob/master/LICENSE.rst)
|
||||
* [**websockets**](https://github.com/aaugustin/websockets) - For downloading over websocket. Licensed under [BSD3](https://github.com/aaugustin/websockets/blob/main/LICENSE)
|
||||
@@ -267,7 +267,7 @@ ## DEPENDENCIES
|
||||
|
||||
The Windows and MacOS standalone release binaries are already built with the python interpreter, mutagen, pycryptodomex and websockets included.
|
||||
|
||||
**Note**: There are some regressions in newer ffmpeg versions that causes various issues when used alongside yt-dlp. Since ffmpeg is such an important dependancy, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds/wiki/Latest#latest-autobuilds) with patches for these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specifc issues solved by these builds
|
||||
**Note**: There are some regressions in newer ffmpeg versions that causes various issues when used alongside yt-dlp. Since ffmpeg is such an important dependency, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds/wiki/Latest#latest-autobuilds) with patches for these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specific issues solved by these builds
|
||||
|
||||
|
||||
## COMPILE
|
||||
@@ -287,15 +287,17 @@ ## COMPILE
|
||||
|
||||
**Note**: In either platform, `devscripts/update-version.py` can be used to automatically update the version number
|
||||
|
||||
You can also fork the project on github and push it to a release branch in your fork for the [build workflow](https://github.com/yt-dlp/yt-dlp/blob/master/.github/workflows/build.yml) to automatically make a release for you
|
||||
You can also fork the project on github and run your fork's [build workflow](.github/workflows/build.yml) to automatically build a release
|
||||
|
||||
# USAGE AND OPTIONS
|
||||
|
||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||
yt-dlp [OPTIONS] [--] URL [URL...]
|
||||
|
||||
`Ctrl+F` is your friend :D
|
||||
<!-- Auto generated -->
|
||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||
|
||||
<!-- Auto generated -->
|
||||
## General Options:
|
||||
-h, --help Print this help text and exit
|
||||
--version Print program version and exit
|
||||
@@ -339,6 +341,14 @@ ## General Options:
|
||||
--flat-playlist Do not extract the videos of a playlist,
|
||||
only list them
|
||||
--no-flat-playlist Extract the videos of a playlist
|
||||
--live-from-start Download livestreams from the start.
|
||||
Currently only supported for YouTube
|
||||
--no-live-from-start Download livestreams from the current time
|
||||
(default)
|
||||
--wait-for-video MIN[-MAX] Wait for scheduled streams to become
|
||||
available. Pass the minimum number of
|
||||
seconds (or range) to wait between retries
|
||||
--no-wait-for-video Do not wait for scheduled streams (default)
|
||||
--mark-watched Mark videos watched (even with --simulate).
|
||||
Currently only supported for YouTube
|
||||
--no-mark-watched Do not mark videos watched (default)
|
||||
@@ -388,7 +398,6 @@ ## Video Selection:
|
||||
specify range: "--playlist-items
|
||||
1-3,7,10-13", it will download the videos
|
||||
at index 1, 2, 3, 7, 10, 11, 12 and 13
|
||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||
--min-filesize SIZE Do not download any videos smaller than
|
||||
SIZE (e.g. 50k or 44.6m)
|
||||
--max-filesize SIZE Do not download any videos larger than SIZE
|
||||
@@ -430,18 +439,23 @@ ## Video Selection:
|
||||
--download-archive FILE Download only videos not listed in the
|
||||
archive file. Record the IDs of all
|
||||
downloaded videos in it
|
||||
--no-download-archive Do not use archive file (default)
|
||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||
--break-on-existing Stop the download process when encountering
|
||||
a file that is in the archive
|
||||
--break-on-reject Stop the download process when encountering
|
||||
a file that has been filtered out
|
||||
--break-per-input Make --break-on-existing and --break-on-
|
||||
reject act only on the current input URL
|
||||
--no-break-per-input --break-on-existing and --break-on-reject
|
||||
terminates the entire download queue
|
||||
--skip-playlist-after-errors N Number of allowed failures until the rest
|
||||
of the playlist is skipped
|
||||
--no-download-archive Do not use archive file (default)
|
||||
|
||||
## Download Options:
|
||||
-N, --concurrent-fragments N Number of fragments of a dash/hlsnative
|
||||
video that should be download concurrently
|
||||
(default is 1)
|
||||
video that should be downloaded
|
||||
concurrently (default is 1)
|
||||
-r, --limit-rate RATE Maximum download rate in bytes per second
|
||||
(e.g. 50K or 4.2M)
|
||||
--throttled-rate RATE Minimum download rate in bytes per second
|
||||
@@ -449,6 +463,8 @@ ## Download Options:
|
||||
video data is re-extracted (e.g. 100K)
|
||||
-R, --retries RETRIES Number of retries (default is 10), or
|
||||
"infinite"
|
||||
--file-access-retries RETRIES Number of times to retry on file access
|
||||
error (default is 10), or "infinite"
|
||||
--fragment-retries RETRIES Number of retries for a fragment (default
|
||||
is 10), or "infinite" (DASH, hlsnative and
|
||||
ISM)
|
||||
@@ -511,9 +527,9 @@ ## Download Options:
|
||||
(Alias: --external-downloader-args)
|
||||
|
||||
## Filesystem Options:
|
||||
-a, --batch-file FILE File containing URLs to download ('-' for
|
||||
-a, --batch-file FILE File containing URLs to download ("-" for
|
||||
stdin), one URL per line. Lines starting
|
||||
with '#', ';' or ']' are considered as
|
||||
with "#", ";" or "]" are considered as
|
||||
comments and ignored
|
||||
--no-batch-file Do not read URLs from batch file (default)
|
||||
-P, --paths [TYPES:]PATH The paths where the files should be
|
||||
@@ -587,8 +603,8 @@ ## Filesystem Options:
|
||||
--load-info-json FILE JSON file containing the video information
|
||||
(created with the "--write-info-json"
|
||||
option)
|
||||
--cookies FILE File to read cookies from and dump cookie
|
||||
jar in
|
||||
--cookies FILE Netscape formatted file to read cookies
|
||||
from and dump cookie jar in
|
||||
--no-cookies Do not read/dump cookies from/to file
|
||||
(default)
|
||||
--cookies-from-browser BROWSER[:PROFILE]
|
||||
@@ -792,7 +808,7 @@ ## Post-Processing Options:
|
||||
--audio-format FORMAT Specify audio format to convert the audio
|
||||
to when -x is used. Currently supported
|
||||
formats are: best (default) or one of
|
||||
best|aac|flac|mp3|m4a|opus|vorbis|wav
|
||||
best|aac|flac|mp3|m4a|opus|vorbis|wav|alac
|
||||
--audio-quality QUALITY Specify ffmpeg audio quality, insert a
|
||||
value between 0 (best) and 10 (worst) for
|
||||
VBR or a specific bitrate like 128K
|
||||
@@ -843,15 +859,20 @@ ## Post-Processing Options:
|
||||
--no-embed-subs Do not embed subtitles (default)
|
||||
--embed-thumbnail Embed thumbnail in the video as cover art
|
||||
--no-embed-thumbnail Do not embed thumbnail (default)
|
||||
--embed-metadata Embed metadata to the video file. Also adds
|
||||
chapters to file unless --no-add-chapters
|
||||
is used (Alias: --add-metadata)
|
||||
--embed-metadata Embed metadata to the video file. Also
|
||||
embeds chapters/infojson if present unless
|
||||
--no-embed-chapters/--no-embed-info-json
|
||||
are used (Alias: --add-metadata)
|
||||
--no-embed-metadata Do not add metadata to file (default)
|
||||
(Alias: --no-add-metadata)
|
||||
--embed-chapters Add chapter markers to the video file
|
||||
(Alias: --add-chapters)
|
||||
--no-embed-chapters Do not add chapter markers (default)
|
||||
(Alias: --no-add-chapters)
|
||||
--embed-info-json Embed the infojson as an attachment to
|
||||
mkv/mka video files
|
||||
--no-embed-info-json Do not embed the infojson as an attachment
|
||||
to the video file
|
||||
--parse-metadata FROM:TO Parse additional metadata like title/artist
|
||||
from other fields; see "MODIFYING METADATA"
|
||||
for details
|
||||
@@ -866,7 +887,7 @@ ## Post-Processing Options:
|
||||
emit a warning), detect_or_warn (the
|
||||
default; fix file if we can, warn
|
||||
otherwise), force (try fixing even if file
|
||||
already exists
|
||||
already exists)
|
||||
--ffmpeg-location PATH Location of the ffmpeg binary; either the
|
||||
path to the binary or its containing
|
||||
directory
|
||||
@@ -910,7 +931,7 @@ ## Post-Processing Options:
|
||||
(default)
|
||||
--force-keyframes-at-cuts Force keyframes around the chapters before
|
||||
removing/splitting them. Requires a
|
||||
reencode and thus is very slow, but the
|
||||
re-encode and thus is very slow, but the
|
||||
resulting video may have fewer artifacts
|
||||
around the cuts
|
||||
--no-force-keyframes-at-cuts Do not force keyframes around the chapters
|
||||
@@ -918,7 +939,7 @@ ## Post-Processing Options:
|
||||
--use-postprocessor NAME[:ARGS] The (case sensitive) name of plugin
|
||||
postprocessors to be enabled, and
|
||||
(optionally) arguments to be passed to it,
|
||||
seperated by a colon ":". ARGS are a
|
||||
separated by a colon ":". ARGS are a
|
||||
semicolon ";" delimited list of NAME=VALUE.
|
||||
The "when" argument determines when the
|
||||
postprocessor is invoked. It can be one of
|
||||
@@ -937,19 +958,21 @@ ## SponsorBlock Options:
|
||||
|
||||
--sponsorblock-mark CATS SponsorBlock categories to create chapters
|
||||
for, separated by commas. Available
|
||||
categories are all, sponsor, intro, outro,
|
||||
selfpromo, interaction, preview,
|
||||
music_offtopic. You can prefix the category
|
||||
with a "-" to exempt it. See
|
||||
https://wiki.sponsor.ajay.app/index.php/Segment_Categories
|
||||
for description of the categories. Eg:
|
||||
--sponsorblock-query all,-preview
|
||||
categories are all, default(=all), sponsor,
|
||||
intro, outro, selfpromo, preview, filler,
|
||||
interaction, music_offtopic, poi_highlight.
|
||||
You can prefix the category with a "-" to
|
||||
exempt it. See [1] for description of the
|
||||
categories. Eg: --sponsorblock-mark all,-preview
|
||||
[1] https://wiki.sponsor.ajay.app/w/Segment_Categories
|
||||
--sponsorblock-remove CATS SponsorBlock categories to be removed from
|
||||
the video file, separated by commas. If a
|
||||
category is present in both mark and
|
||||
remove, remove takes precedence. The syntax
|
||||
and available categories are the same as
|
||||
for --sponsorblock-mark
|
||||
for --sponsorblock-mark except that
|
||||
"default" refers to "all,-filler" and
|
||||
poi_highlight is not available
|
||||
--sponsorblock-chapter-title TEMPLATE
|
||||
The title template for SponsorBlock
|
||||
chapters created by --sponsorblock-mark.
|
||||
@@ -995,7 +1018,7 @@ # CONFIGURATION
|
||||
* `~/yt-dlp.conf`
|
||||
* `~/yt-dlp.conf.txt`
|
||||
|
||||
`%XDG_CONFIG_HOME%` defaults to `~/.config` if undefined. On windows, `%APPDATA%` generally points to (`C:\Users\<user name>\AppData\Roaming`) and `~` points to `%HOME%` if present, `%USERPROFILE%` (generally `C:\Users\<user name>`), or `%HOMEDRIVE%%HOMEPATH%`
|
||||
`%XDG_CONFIG_HOME%` defaults to `~/.config` if undefined. On windows, `%APPDATA%` generally points to `C:\Users\<user name>\AppData\Roaming` and `~` points to `%HOME%` if present, `%USERPROFILE%` (generally `C:\Users\<user name>`), or `%HOMEDRIVE%%HOMEPATH%`
|
||||
1. **System Configuration**: `/etc/yt-dlp.conf`
|
||||
|
||||
For example, with the following configuration file yt-dlp will always extract the audio, not copy the mtime, use a proxy and save all videos under `YouTube` directory in your home directory:
|
||||
@@ -1043,7 +1066,9 @@ # OUTPUT TEMPLATE
|
||||
|
||||
The `-o` option is used to indicate a template for the output file names while `-P` option is used to specify the path each type of file should be saved to.
|
||||
|
||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||
**tl;dr:** [navigate me to examples](#output-template-examples).
|
||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||
|
||||
The simplest usage of `-o` is not to set any template arguments when downloading a single file, like in `yt-dlp -o funny_video.flv "https://some/video"` (hard-coding file extension like this is _not_ recommended and could break some post-processing).
|
||||
|
||||
@@ -1051,19 +1076,27 @@ # OUTPUT TEMPLATE
|
||||
|
||||
The field names themselves (the part inside the parenthesis) can also have some special formatting:
|
||||
1. **Object traversal**: The dictionaries and lists available in metadata can be traversed by using a `.` (dot) separator. You can also do python slicing using `:`. Eg: `%(tags.0)s`, `%(subtitles.en.-1.ext)s`, `%(id.3:7:-1)s`, `%(formats.:.format_id)s`. `%()s` refers to the entire infodict. Note that all the fields that become available using this method are not listed below. Use `-j` to see such fields
|
||||
|
||||
1. **Addition**: Addition and subtraction of numeric fields can be done using `+` and `-` respectively. Eg: `%(playlist_index+10)03d`, `%(n_entries+1-playlist_index)d`
|
||||
|
||||
1. **Date/time Formatting**: Date/time fields can be formatted according to [strftime formatting](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) by specifying it separated from the field name using a `>`. Eg: `%(duration>%H-%M-%S)s`, `%(upload_date>%Y-%m-%d)s`, `%(epoch-3600>%H-%M-%S)s`
|
||||
1. **Alternatives**: Alternate fields can be specified seperated with a `,`. Eg: `%(release_date>%Y,upload_date>%Y|Unknown)s`
|
||||
1. **Default**: A literal default value can be specified for when the field is empty using a `|` seperator. This overrides `--output-na-template`. Eg: `%(uploader|Unknown)s`
|
||||
1. **More Conversions**: In addition to the normal format types `diouxXeEfFgGcrs`, `B`, `j`, `l`, `q` can be used for converting to **B**ytes, **j**son (flag `#` for pretty-printing), a comma seperated **l**ist (flag `#` for `\n` newline-seperated) and a string **q**uoted for the terminal (flag `#` to split a list into different arguments), respectively
|
||||
|
||||
1. **Alternatives**: Alternate fields can be specified separated with a `,`. Eg: `%(release_date>%Y,upload_date>%Y|Unknown)s`
|
||||
|
||||
1. **Replacement**: A replacement value can specified using a `&` separator. If the field is *not* empty, this replacement value will be used instead of the actual field content. This is done after alternate fields are considered; thus the replacement is used if *any* of the alternative fields is *not* empty.
|
||||
|
||||
1. **Default**: A literal default value can be specified for when the field is empty using a `|` separator. This overrides `--output-na-template`. Eg: `%(uploader|Unknown)s`
|
||||
|
||||
1. **More Conversions**: In addition to the normal format types `diouxXeEfFgGcrs`, `B`, `j`, `l`, `q`, `D`, `S` can be used for converting to **B**ytes, **j**son (flag `#` for pretty-printing), a comma separated **l**ist (flag `#` for `\n` newline-separated), a string **q**uoted for the terminal (flag `#` to split a list into different arguments), to add **D**ecimal suffixes (Eg: 10M), and to **S**anitize as filename (flag `#` for restricted), respectively
|
||||
|
||||
1. **Unicode normalization**: The format type `U` can be used for NFC [unicode normalization](https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize). The alternate form flag (`#`) changes the normalization to NFD and the conversion flag `+` can be used for NFKC/NFKD compatibility equivalence normalization. Eg: `%(title)+.100U` is NFKC
|
||||
|
||||
To summarize, the general syntax for a field is:
|
||||
```
|
||||
%(name[.keys][addition][>strf][,alternate][|default])[flags][width][.precision][length]type
|
||||
%(name[.keys][addition][>strf][,alternate][&replacement][|default])[flags][width][.precision][length]type
|
||||
```
|
||||
|
||||
Additionally, you can set different output templates for the various metadata files separately from the general output template by specifying the type of file followed by the template separated by a colon `:`. The different file types supported are `subtitle`, `thumbnail`, `description`, `annotation` (deprecated), `infojson`, `link`, `pl_thumbnail`, `pl_description`, `pl_infojson`, `chapter`. For example, `-o '%(title)s.%(ext)s' -o 'thumbnail:%(title)s\%(title)s.%(ext)s'` will put the thumbnails in a folder with the same name as the video. If any of the templates (except default) is empty, that type of file will not be written. Eg: `--write-thumbnail -o "thumbnail:"` will write thumbnails only for playlists and not for video.
|
||||
Additionally, you can set different output templates for the various metadata files separately from the general output template by specifying the type of file followed by the template separated by a colon `:`. The different file types supported are `subtitle`, `thumbnail`, `description`, `annotation` (deprecated), `infojson`, `link`, `pl_thumbnail`, `pl_description`, `pl_infojson`, `chapter`. For example, `-o "%(title)s.%(ext)s" -o "thumbnail:%(title)s\%(title)s.%(ext)s"` will put the thumbnails in a folder with the same name as the video. If any of the templates (except default) is empty, that type of file will not be written. Eg: `--write-thumbnail -o "thumbnail:"` will write thumbnails only for playlists and not for video.
|
||||
|
||||
The available fields are:
|
||||
|
||||
@@ -1094,11 +1127,11 @@ # OUTPUT TEMPLATE
|
||||
- `average_rating` (numeric): Average rating give by users, the scale used depends on the webpage
|
||||
- `comment_count` (numeric): Number of comments on the video (For some extractors, comments are only downloaded at the end, and so this field cannot be used)
|
||||
- `age_limit` (numeric): Age restriction for the video (years)
|
||||
- `live_status` (string): One of 'is_live', 'was_live', 'is_upcoming', 'not_live'
|
||||
- `live_status` (string): One of "is_live", "was_live", "is_upcoming", "not_live"
|
||||
- `is_live` (boolean): Whether this video is a live stream or a fixed-length video
|
||||
- `was_live` (boolean): Whether this video was originally a live stream
|
||||
- `playable_in_embed` (string): Whether this video is allowed to play in embedded players on other sites
|
||||
- `availability` (string): Whether the video is 'private', 'premium_only', 'subscriber_only', 'needs_auth', 'unlisted' or 'public'
|
||||
- `availability` (string): Whether the video is "private", "premium_only", "subscriber_only", "needs_auth", "unlisted" or "public"
|
||||
- `start_time` (numeric): Time in seconds where the reproduction should start, as specified in the URL
|
||||
- `end_time` (numeric): Time in seconds where the reproduction should end, as specified in the URL
|
||||
- `format` (string): A human-readable description of the format
|
||||
@@ -1132,6 +1165,8 @@ # OUTPUT TEMPLATE
|
||||
- `playlist_uploader` (string): Full name of the playlist uploader
|
||||
- `playlist_uploader_id` (string): Nickname or id of the playlist uploader
|
||||
- `webpage_url` (string): A URL to the video webpage which if given to yt-dlp should allow to get the same result again
|
||||
- `webpage_url_basename` (string): The basename of the webpage URL
|
||||
- `webpage_url_domain` (string): The domain of the webpage URL
|
||||
- `original_url` (string): The URL given by the user (or same as `webpage_url` for playlist entries)
|
||||
|
||||
Available for the video that belongs to some logical chapter or section:
|
||||
@@ -1192,43 +1227,49 @@ # OUTPUT TEMPLATE
|
||||
|
||||
For numeric sequences you can use [numeric related formatting](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting), for example, `%(view_count)05d` will result in a string with view count padded with zeros up to 5 characters, like in `00042`.
|
||||
|
||||
Output templates can also contain arbitrary hierarchical path, e.g. `-o '%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s'` which will result in downloading each video in a directory corresponding to this path template. Any missing directory will be automatically created for you.
|
||||
Output templates can also contain arbitrary hierarchical path, e.g. `-o "%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s"` which will result in downloading each video in a directory corresponding to this path template. Any missing directory will be automatically created for you.
|
||||
|
||||
To use percent literals in an output template use `%%`. To output to stdout use `-o -`.
|
||||
|
||||
The current default template is `%(title)s [%(id)s].%(ext)s`.
|
||||
|
||||
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
|
||||
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title.
|
||||
|
||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||
#### Output template and Windows batch files
|
||||
|
||||
If you are using an output template inside a Windows batch file then you must escape plain percent characters (`%`) by doubling, so that `-o "%(title)s-%(id)s.%(ext)s"` should become `-o "%%(title)s-%%(id)s.%%(ext)s"`. However you should not touch `%`'s that are not plain characters, e.g. environment variables for expansion should stay intact: `-o "C:\%HOMEPATH%\Desktop\%%(title)s.%%(ext)s"`.
|
||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||
|
||||
#### Output template examples
|
||||
|
||||
Note that on Windows you need to use double quotes instead of single.
|
||||
|
||||
```bash
|
||||
$ yt-dlp --get-filename -o '%(title)s.%(ext)s' BaW_jenozKc
|
||||
youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
|
||||
$ yt-dlp --get-filename -o "test video.%(ext)s" BaW_jenozKc
|
||||
test video.webm # Literal name with correct extension
|
||||
|
||||
$ yt-dlp --get-filename -o '%(title)s.%(ext)s' BaW_jenozKc --restrict-filenames
|
||||
youtube-dl_test_video_.mp4 # A simple file name
|
||||
$ yt-dlp --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
|
||||
youtube-dl test video ''_ä↭𝕐.webm # All kinds of weird characters
|
||||
|
||||
$ yt-dlp --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
|
||||
youtube-dl_test_video_.webm # Restricted file name
|
||||
|
||||
# Download YouTube playlist videos in separate directory indexed by video order in a playlist
|
||||
$ yt-dlp -o '%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s' https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re
|
||||
$ yt-dlp -o "%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s" "https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re"
|
||||
|
||||
# Download YouTube playlist videos in separate directories according to their uploaded year
|
||||
$ yt-dlp -o '%(upload_date>%Y)s/%(title)s.%(ext)s' https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re
|
||||
$ yt-dlp -o "%(upload_date>%Y)s/%(title)s.%(ext)s" "https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re"
|
||||
|
||||
# Prefix playlist index with " - " separator, but only if it is available
|
||||
$ yt-dlp -o '%(playlist_index|)s%(playlist_index& - |)s%(title)s.%(ext)s' BaW_jenozKc https://www.youtube.com/user/TheLinuxFoundation/playlists
|
||||
|
||||
# Download all playlists of YouTube channel/user keeping each playlist in separate directory:
|
||||
$ yt-dlp -o '%(uploader)s/%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s' https://www.youtube.com/user/TheLinuxFoundation/playlists
|
||||
$ yt-dlp -o "%(uploader)s/%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s" "https://www.youtube.com/user/TheLinuxFoundation/playlists"
|
||||
|
||||
# Download Udemy course keeping each chapter in separate directory under MyVideos directory in your home
|
||||
$ yt-dlp -u user -p password -P '~/MyVideos' -o '%(playlist)s/%(chapter_number)s - %(chapter)s/%(title)s.%(ext)s' https://www.udemy.com/java-tutorial/
|
||||
$ yt-dlp -u user -p password -P "~/MyVideos" -o "%(playlist)s/%(chapter_number)s - %(chapter)s/%(title)s.%(ext)s" "https://www.udemy.com/java-tutorial"
|
||||
|
||||
# Download entire series season keeping each series and each season in separate directory under C:/MyVideos
|
||||
$ yt-dlp -P "C:/MyVideos" -o "%(series)s/%(season_number)s - %(season)s/%(episode_number)s - %(episode)s.%(ext)s" https://videomore.ru/kino_v_detalayah/5_sezon/367617
|
||||
$ yt-dlp -P "C:/MyVideos" -o "%(series)s/%(season_number)s - %(season)s/%(episode_number)s - %(episode)s.%(ext)s" "https://videomore.ru/kino_v_detalayah/5_sezon/367617"
|
||||
|
||||
# Stream the video being downloaded to stdout
|
||||
$ yt-dlp -o - BaW_jenozKc
|
||||
@@ -1243,12 +1284,16 @@ # FORMAT SELECTION
|
||||
|
||||
The general syntax for format selection is `-f FORMAT` (or `--format FORMAT`) where `FORMAT` is a *selector expression*, i.e. an expression that describes format or formats you would like to download.
|
||||
|
||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||
**tl;dr:** [navigate me to examples](#format-selection-examples).
|
||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||
|
||||
The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific.
|
||||
|
||||
You can also use a file extension (currently `3gp`, `aac`, `flv`, `m4a`, `mp3`, `mp4`, `ogg`, `wav`, `webm` are supported) to download the best quality format of a particular file extension served as a single file, e.g. `-f webm` will download the best quality format with the `webm` extension served as a single file.
|
||||
|
||||
You can use `-f -` to interactively provide the format selector *for each video*
|
||||
|
||||
You can also use special names to select particular edge case formats:
|
||||
|
||||
- `all`: Select **all formats** separately
|
||||
@@ -1311,7 +1356,7 @@ ## Filtering Formats
|
||||
|
||||
Formats for which the value is not known are excluded unless you put a question mark (`?`) after the operator. You can combine format filters, so `-f "[height<=?720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. You can also use the filters with `all` to download all formats that satisfy the filter. For example, `-f "all[vcodec=none]"` selects all audio-only formats.
|
||||
|
||||
Format selectors can also be grouped using parentheses, for example if you want to download the best mp4 and webm formats with a height lower than 480 you can use `-f '(mp4,webm)[height<480]'`.
|
||||
Format selectors can also be grouped using parentheses, for example if you want to download the best pre-merged mp4 and webm formats with a height lower than 480 you can use `-f "(mp4,webm)[height<480]"`.
|
||||
|
||||
## Sorting Formats
|
||||
|
||||
@@ -1325,12 +1370,12 @@ ## Sorting Formats
|
||||
- `lang`: Language preference as given by the extractor
|
||||
- `quality`: The quality of the format as given by the extractor
|
||||
- `source`: Preference of the source as given by the extractor
|
||||
- `proto`: Protocol used for download (`https`/`ftps` > `http`/`ftp` > `m3u8_native`/`m3u8` > `http_dash_segments`> `websocket_frag` > other > `mms`/`rtsp` > unknown > `f4f`/`f4m`)
|
||||
- `vcodec`: Video Codec (`av01` > `vp9.2` > `vp9` > `h265` > `h264` > `vp8` > `h263` > `theora` > other > unknown)
|
||||
- `acodec`: Audio Codec (`opus` > `vorbis` > `aac` > `mp4a` > `mp3` > `eac3` > `ac3` > `dts` > other > unknown)
|
||||
- `proto`: Protocol used for download (`https`/`ftps` > `http`/`ftp` > `m3u8_native`/`m3u8` > `http_dash_segments`> `websocket_frag` > `mms`/`rtsp` > `f4f`/`f4m`)
|
||||
- `vcodec`: Video Codec (`av01` > `vp9.2` > `vp9` > `h265` > `h264` > `vp8` > `h263` > `theora` > other)
|
||||
- `acodec`: Audio Codec (`flac`/`alac` > `wav`/`aiff` > `opus` > `vorbis` > `aac` > `mp4a` > `mp3` > `eac3` > `ac3` > `dts` > other)
|
||||
- `codec`: Equivalent to `vcodec,acodec`
|
||||
- `vext`: Video Extension (`mp4` > `webm` > `flv` > other > unknown). If `--prefer-free-formats` is used, `webm` is prefered.
|
||||
- `aext`: Audio Extension (`m4a` > `aac` > `mp3` > `ogg` > `opus` > `webm` > other > unknown). If `--prefer-free-formats` is used, the order changes to `opus` > `ogg` > `webm` > `m4a` > `mp3` > `aac`.
|
||||
- `vext`: Video Extension (`mp4` > `webm` > `flv` > other). If `--prefer-free-formats` is used, `webm` is preferred.
|
||||
- `aext`: Audio Extension (`m4a` > `aac` > `mp3` > `ogg` > `opus` > `webm` > other). If `--prefer-free-formats` is used, the order changes to `opus` > `ogg` > `webm` > `m4a` > `mp3` > `aac`.
|
||||
- `ext`: Equivalent to `vext,aext`
|
||||
- `filesize`: Exact filesize, if known in advance
|
||||
- `fs_approx`: Approximate filesize calculated from the manifests
|
||||
@@ -1352,7 +1397,7 @@ ## Sorting Formats
|
||||
|
||||
The fields `hasvid` and `ie_pref` are always given highest priority in sorting, irrespective of the user-defined order. This behaviour can be changed by using `--format-sort-force`. Apart from these, the default order used is: `lang,quality,res,fps,hdr:12,codec:vp9.2,size,br,asr,proto,ext,hasaud,source,id`. The extractors may override this default order, but they cannot override the user-provided order.
|
||||
|
||||
Note that the default has `codec:vp9.2`; i.e. `av1` is not prefered. Similarly, the default for hdr is `hdr:12`; i.e. dolby vision is not prefered. These choices are made since DV and AV1 formats are not yet fully compatible with most devices. This may be changed in the future as more devices become capable of smoothly playing back these formats.
|
||||
Note that the default has `codec:vp9.2`; i.e. `av1` is not preferred. Similarly, the default for hdr is `hdr:12`; i.e. dolby vision is not preferred. These choices are made since DV and AV1 formats are not yet fully compatible with most devices. This may be changed in the future as more devices become capable of smoothly playing back these formats.
|
||||
|
||||
If your format selector is `worst`, the last item is selected after sorting. This means it will select the format that is worst in all respects. Most of the time, what you actually want is the video with the smallest filesize instead. So it is generally better to use `-f best -S +size,+br,+res,+fps`.
|
||||
|
||||
@@ -1360,16 +1405,14 @@ ## Sorting Formats
|
||||
|
||||
## Format Selection examples
|
||||
|
||||
Note that on Windows you may need to use double quotes instead of single.
|
||||
|
||||
```bash
|
||||
# Download and merge the best video-only format and the best audio-only format,
|
||||
# or download the best combined format if video-only format is not available
|
||||
$ yt-dlp -f 'bv+ba/b'
|
||||
$ yt-dlp -f "bv+ba/b"
|
||||
|
||||
# Download best format that contains video,
|
||||
# and if it doesn't already have an audio stream, merge it with best audio-only format
|
||||
$ yt-dlp -f 'bv*+ba/b'
|
||||
$ yt-dlp -f "bv*+ba/b"
|
||||
|
||||
# Same as above
|
||||
$ yt-dlp
|
||||
@@ -1377,89 +1420,89 @@ # Same as above
|
||||
# Download the best video-only format and the best audio-only format without merging them
|
||||
# For this case, an output template should be used since
|
||||
# by default, bestvideo and bestaudio will have the same file name.
|
||||
$ yt-dlp -f 'bv,ba' -o '%(title)s.f%(format_id)s.%(ext)s'
|
||||
$ yt-dlp -f "bv,ba" -o "%(title)s.f%(format_id)s.%(ext)s"
|
||||
|
||||
# Download and merge the best format that has a video stream,
|
||||
# and all audio-only formats into one file
|
||||
$ yt-dlp -f 'bv*+mergeall[vcodec=none]' --audio-multistreams
|
||||
$ yt-dlp -f "bv*+mergeall[vcodec=none]" --audio-multistreams
|
||||
|
||||
# Download and merge the best format that has a video stream,
|
||||
# and the best 2 audio-only formats into one file
|
||||
$ yt-dlp -f 'bv*+ba+ba.2' --audio-multistreams
|
||||
$ yt-dlp -f "bv*+ba+ba.2" --audio-multistreams
|
||||
|
||||
|
||||
# The following examples show the old method (without -S) of format selection
|
||||
# and how to use -S to achieve a similar but (generally) better result
|
||||
|
||||
# Download the worst video available (old method)
|
||||
$ yt-dlp -f 'wv*+wa/w'
|
||||
$ yt-dlp -f "wv*+wa/w"
|
||||
|
||||
# Download the best video available but with the smallest resolution
|
||||
$ yt-dlp -S '+res'
|
||||
$ yt-dlp -S "+res"
|
||||
|
||||
# Download the smallest video available
|
||||
$ yt-dlp -S '+size,+br'
|
||||
$ yt-dlp -S "+size,+br"
|
||||
|
||||
|
||||
|
||||
# Download the best mp4 video available, or the best video if no mp4 available
|
||||
$ yt-dlp -f 'bv*[ext=mp4]+ba[ext=m4a]/b[ext=mp4] / bv*+ba/b'
|
||||
$ yt-dlp -f "bv*[ext=mp4]+ba[ext=m4a]/b[ext=mp4] / bv*+ba/b"
|
||||
|
||||
# Download the best video with the best extension
|
||||
# (For video, mp4 > webm > flv. For audio, m4a > aac > mp3 ...)
|
||||
$ yt-dlp -S 'ext'
|
||||
$ yt-dlp -S "ext"
|
||||
|
||||
|
||||
|
||||
# Download the best video available but no better than 480p,
|
||||
# or the worst video if there is no video under 480p
|
||||
$ yt-dlp -f 'bv*[height<=480]+ba/b[height<=480] / wv*+ba/w'
|
||||
$ yt-dlp -f "bv*[height<=480]+ba/b[height<=480] / wv*+ba/w"
|
||||
|
||||
# Download the best video available with the largest height but no better than 480p,
|
||||
# or the best video with the smallest resolution if there is no video under 480p
|
||||
$ yt-dlp -S 'height:480'
|
||||
$ yt-dlp -S "height:480"
|
||||
|
||||
# Download the best video available with the largest resolution but no better than 480p,
|
||||
# or the best video with the smallest resolution if there is no video under 480p
|
||||
# Resolution is determined by using the smallest dimension.
|
||||
# So this works correctly for vertical videos as well
|
||||
$ yt-dlp -S 'res:480'
|
||||
$ yt-dlp -S "res:480"
|
||||
|
||||
|
||||
|
||||
# Download the best video (that also has audio) but no bigger than 50 MB,
|
||||
# or the worst video (that also has audio) if there is no video under 50 MB
|
||||
$ yt-dlp -f 'b[filesize<50M] / w'
|
||||
$ yt-dlp -f "b[filesize<50M] / w"
|
||||
|
||||
# Download largest video (that also has audio) but no bigger than 50 MB,
|
||||
# or the smallest video (that also has audio) if there is no video under 50 MB
|
||||
$ yt-dlp -f 'b' -S 'filesize:50M'
|
||||
$ yt-dlp -f "b" -S "filesize:50M"
|
||||
|
||||
# Download best video (that also has audio) that is closest in size to 50 MB
|
||||
$ yt-dlp -f 'b' -S 'filesize~50M'
|
||||
$ yt-dlp -f "b" -S "filesize~50M"
|
||||
|
||||
|
||||
|
||||
# Download best video available via direct link over HTTP/HTTPS protocol,
|
||||
# or the best video available via any protocol if there is no such video
|
||||
$ yt-dlp -f '(bv*+ba/b)[protocol^=http][protocol!*=dash] / (bv*+ba/b)'
|
||||
$ yt-dlp -f "(bv*+ba/b)[protocol^=http][protocol!*=dash] / (bv*+ba/b)"
|
||||
|
||||
# Download best video available via the best protocol
|
||||
# (https/ftps > http/ftp > m3u8_native > m3u8 > http_dash_segments ...)
|
||||
$ yt-dlp -S 'proto'
|
||||
$ yt-dlp -S "proto"
|
||||
|
||||
|
||||
|
||||
# Download the best video with h264 codec, or the best video if there is no such video
|
||||
$ yt-dlp -f '(bv*+ba/b)[vcodec^=avc1] / (bv*+ba/b)'
|
||||
$ yt-dlp -f "(bv*+ba/b)[vcodec^=avc1] / (bv*+ba/b)"
|
||||
|
||||
# Download the best video with best codec no better than h264,
|
||||
# or the best video with worst codec if there is no such video
|
||||
$ yt-dlp -S 'codec:h264'
|
||||
$ yt-dlp -S "codec:h264"
|
||||
|
||||
# Download the best video with worst codec no worse than h264,
|
||||
# or the best video with best codec if there is no such video
|
||||
$ yt-dlp -S '+codec:h264'
|
||||
$ yt-dlp -S "+codec:h264"
|
||||
|
||||
|
||||
|
||||
@@ -1467,19 +1510,19 @@ # More complex examples
|
||||
|
||||
# Download the best video no better than 720p preferring framerate greater than 30,
|
||||
# or the worst video (still preferring framerate greater than 30) if there is no such video
|
||||
$ yt-dlp -f '((bv*[fps>30]/bv*)[height<=720]/(wv*[fps>30]/wv*)) + ba / (b[fps>30]/b)[height<=720]/(w[fps>30]/w)'
|
||||
$ yt-dlp -f "((bv*[fps>30]/bv*)[height<=720]/(wv*[fps>30]/wv*)) + ba / (b[fps>30]/b)[height<=720]/(w[fps>30]/w)"
|
||||
|
||||
# Download the video with the largest resolution no better than 720p,
|
||||
# or the video with the smallest resolution available if there is no such video,
|
||||
# preferring larger framerate for formats with the same resolution
|
||||
$ yt-dlp -S 'res:720,fps'
|
||||
$ yt-dlp -S "res:720,fps"
|
||||
|
||||
|
||||
|
||||
# Download the video with smallest resolution no worse than 480p,
|
||||
# or the video with the largest resolution available if there is no such video,
|
||||
# preferring better codec and then larger total bitrate for the same resolution
|
||||
$ yt-dlp -S '+res:480,codec,br'
|
||||
$ yt-dlp -S "+res:480,codec,br"
|
||||
```
|
||||
|
||||
# MODIFYING METADATA
|
||||
@@ -1520,26 +1563,24 @@ # MODIFYING METADATA
|
||||
|
||||
## Modifying metadata examples
|
||||
|
||||
Note that on Windows you may need to use double quotes instead of single.
|
||||
|
||||
```bash
|
||||
# Interpret the title as "Artist - Title"
|
||||
$ yt-dlp --parse-metadata 'title:%(artist)s - %(title)s'
|
||||
$ yt-dlp --parse-metadata "title:%(artist)s - %(title)s"
|
||||
|
||||
# Regex example
|
||||
$ yt-dlp --parse-metadata 'description:Artist - (?P<artist>.+)'
|
||||
$ yt-dlp --parse-metadata "description:Artist - (?P<artist>.+)"
|
||||
|
||||
# Set title as "Series name S01E05"
|
||||
$ yt-dlp --parse-metadata '%(series)s S%(season_number)02dE%(episode_number)02d:%(title)s'
|
||||
$ yt-dlp --parse-metadata "%(series)s S%(season_number)02dE%(episode_number)02d:%(title)s"
|
||||
|
||||
# Set "comment" field in video metadata using description instead of webpage_url
|
||||
$ yt-dlp --parse-metadata 'description:(?s)(?P<meta_comment>.+)' --add-metadata
|
||||
$ yt-dlp --parse-metadata "description:(?s)(?P<meta_comment>.+)" --add-metadata
|
||||
|
||||
# Remove "formats" field from the infojson by setting it to an empty string
|
||||
$ yt-dlp --parse-metadata ':(?P<formats>)' -j
|
||||
$ yt-dlp --parse-metadata ":(?P<formats>)" -j
|
||||
|
||||
# Replace all spaces and "_" in title and uploader with a `-`
|
||||
$ yt-dlp --replace-in-metadata 'title,uploader' '[ _]' '-'
|
||||
$ yt-dlp --replace-in-metadata "title,uploader" "[ _]" "-"
|
||||
|
||||
```
|
||||
|
||||
@@ -1551,12 +1592,14 @@ # EXTRACTOR ARGUMENTS
|
||||
|
||||
#### youtube
|
||||
* `skip`: `hls` or `dash` (or both) to skip download of the respective manifests
|
||||
* `player_client`: Clients to extract video data from. The main clients are `web`, `android`, `ios`, `mweb`. These also have `_music`, `_embedded`, `_agegate`, and `_creator` variants (Eg: `web_embedded`) (`mweb` has only `_agegate`). By default, `android,web` is used, but the agegate and creator variants are added as required for age-gated videos. Similarly the music variants are added for `music.youtube.com` urls. You can also use `all` to use all the clients
|
||||
* `player_client`: Clients to extract video data from. The main clients are `web`, `android`, `ios`, `mweb`. These also have `_music`, `_embedded`, `_agegate`, and `_creator` variants (Eg: `web_embedded`) (`mweb` has only `_agegate`). By default, `android,web` is used, but the agegate and creator variants are added as required for age-gated videos. Similarly the music variants are added for `music.youtube.com` urls. You can also use `all` to use all the clients, and `default` for the default clients.
|
||||
* `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause some issues. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) for more details
|
||||
* `include_live_dash`: Include live dash formats (These formats don't download properly)
|
||||
* `include_live_dash`: Include live dash formats even without `--live-from-start` (These formats don't download properly)
|
||||
* `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side)
|
||||
* `max_comments`: Maximum amount of comments to download (default all)
|
||||
* `max_comment_depth`: Maximum depth for nested comments. YouTube supports depths 1 or 2 (default)
|
||||
* `max_comments`: Limit the amount of comments to gather. Comma-separated list of integers representing `max-comments,max-parents,max-replies,max-replies-per-thread`. Default is `all,all,all,all`.
|
||||
* E.g. `all,all,1000,10` will get a maximum of 1000 replies total, with up to 10 replies per thread. `1000,all,100` will get a maximum of 1000 comments, with a maximum of 100 replies total.
|
||||
* `max_comment_depth` Maximum depth for nested comments. YouTube supports depths 1 or 2 (default)
|
||||
* **Deprecated**: Set `max-replies` to `0` or `all` in `max_comments` instead (e.g. `max_comments=all,all,0` to get no replies)
|
||||
|
||||
#### youtubetab (YouTube playlists, channels, feeds, etc.)
|
||||
* `skip`: One or more of `webpage` (skip initial webpage download), `authcheck` (allow the download of playlists requiring authentication when no initial webpage is downloaded. This may cause unwanted behavior, see [#1122](https://github.com/yt-dlp/yt-dlp/pull/1122) for more details)
|
||||
@@ -1572,8 +1615,16 @@ #### crunchyroll
|
||||
#### vikichannel
|
||||
* `video_types`: Types of videos to download - one or more of `episodes`, `movies`, `clips`, `trailers`
|
||||
|
||||
#### youtubewebarchive
|
||||
* `check_all`: Try to check more at the cost of more requests. One or more of `thumbnails`, `captures`
|
||||
|
||||
#### gamejolt
|
||||
* `comment_sort`: `hot` (default), `you` (cookies needed), `top`, `new` - choose comment sorting mode (on GameJolt's side)
|
||||
|
||||
NOTE: These options may be changed/removed in the future without concern for backward compatibility
|
||||
|
||||
<!-- MANPAGE: MOVE "INSTALLATION" SECTION HERE -->
|
||||
|
||||
|
||||
# PLUGINS
|
||||
|
||||
@@ -1600,14 +1651,14 @@ # EMBEDDING YT-DLP
|
||||
```python
|
||||
from yt_dlp import YoutubeDL
|
||||
|
||||
ydl_opts = {}
|
||||
ydl_opts = {'format': 'bestaudio'}
|
||||
with YoutubeDL(ydl_opts) as ydl:
|
||||
ydl.download(['https://www.youtube.com/watch?v=BaW_jenozKc'])
|
||||
```
|
||||
|
||||
Most likely, you'll want to use various options. For a list of options available, have a look at [`yt_dlp/YoutubeDL.py`](yt_dlp/YoutubeDL.py#L154-L452).
|
||||
Most likely, you'll want to use various options. For a list of options available, have a look at [`yt_dlp/YoutubeDL.py`](yt_dlp/YoutubeDL.py#L162).
|
||||
|
||||
Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), converts the video to an mp3 file, implements a custom postprocessor and prints the final info_dict as json:
|
||||
Here's a more complete example demonstrating various functionality:
|
||||
|
||||
```python
|
||||
import json
|
||||
@@ -1616,7 +1667,7 @@ # EMBEDDING YT-DLP
|
||||
|
||||
class MyLogger:
|
||||
def debug(self, msg):
|
||||
# For compatability with youtube-dl, both debug and info are passed into debug
|
||||
# For compatibility with youtube-dl, both debug and info are passed into debug
|
||||
# You can distinguish them by the prefix '[debug] '
|
||||
if msg.startswith('[debug] '):
|
||||
pass
|
||||
@@ -1633,23 +1684,56 @@ # EMBEDDING YT-DLP
|
||||
print(msg)
|
||||
|
||||
|
||||
# ℹ️ See the docstring of yt_dlp.postprocessor.common.PostProcessor
|
||||
class MyCustomPP(yt_dlp.postprocessor.PostProcessor):
|
||||
# ℹ️ See docstring of yt_dlp.postprocessor.common.PostProcessor.run
|
||||
def run(self, info):
|
||||
self.to_screen('Doing stuff')
|
||||
return [], info
|
||||
|
||||
|
||||
# ℹ️ See "progress_hooks" in the docstring of yt_dlp.YoutubeDL
|
||||
def my_hook(d):
|
||||
if d['status'] == 'finished':
|
||||
print('Done downloading, now converting ...')
|
||||
|
||||
|
||||
def format_selector(ctx):
|
||||
""" Select the best video and the best audio that won't result in an mkv.
|
||||
This is just an example and does not handle all cases """
|
||||
|
||||
# formats are already sorted worst to best
|
||||
formats = ctx.get('formats')[::-1]
|
||||
|
||||
# acodec='none' means there is no audio
|
||||
best_video = next(f for f in formats
|
||||
if f['vcodec'] != 'none' and f['acodec'] == 'none')
|
||||
|
||||
# find compatible audio extension
|
||||
audio_ext = {'mp4': 'm4a', 'webm': 'webm'}[best_video['ext']]
|
||||
# vcodec='none' means there is no video
|
||||
best_audio = next(f for f in formats if (
|
||||
f['acodec'] != 'none' and f['vcodec'] == 'none' and f['ext'] == audio_ext))
|
||||
|
||||
yield {
|
||||
# These are the minimum required fields for a merged format
|
||||
'format_id': f'{best_video["format_id"]}+{best_audio["format_id"]}',
|
||||
'ext': best_video['ext'],
|
||||
'requested_formats': [best_video, best_audio],
|
||||
# Must be + separated list of protocols
|
||||
'protocol': f'{best_video["protocol"]}+{best_audio["protocol"]}'
|
||||
}
|
||||
|
||||
|
||||
# ℹ️ See docstring of yt_dlp.YoutubeDL for a description of the options
|
||||
ydl_opts = {
|
||||
'format': 'bestaudio/best',
|
||||
'format': format_selector,
|
||||
'postprocessors': [{
|
||||
'key': 'FFmpegExtractAudio',
|
||||
'preferredcodec': 'mp3',
|
||||
'preferredquality': '192',
|
||||
# Embed metadata in video using ffmpeg.
|
||||
# ℹ️ See yt_dlp.postprocessor.FFmpegMetadataPP for the arguments it accepts
|
||||
'key': 'FFmpegMetadata',
|
||||
'add_chapters': True,
|
||||
'add_metadata': True,
|
||||
}],
|
||||
'logger': MyLogger(),
|
||||
'progress_hooks': [my_hook],
|
||||
@@ -1659,17 +1743,21 @@ # EMBEDDING YT-DLP
|
||||
# Add custom headers
|
||||
yt_dlp.utils.std_headers.update({'Referer': 'https://www.google.com'})
|
||||
|
||||
# ℹ️ See the public functions in yt_dlp.YoutubeDL for for other available functions.
|
||||
# Eg: "ydl.download", "ydl.download_with_info_file"
|
||||
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
||||
ydl.add_post_processor(MyCustomPP())
|
||||
info = ydl.extract_info('https://www.youtube.com/watch?v=BaW_jenozKc')
|
||||
|
||||
# ℹ️ ydl.sanitize_info makes the info json-serializable
|
||||
print(json.dumps(ydl.sanitize_info(info)))
|
||||
```
|
||||
|
||||
See the public functions in [`yt_dlp/YoutubeDL.py`](yt_dlp/YoutubeDL.py) for other available functions. Eg: `ydl.download`, `ydl.download_with_info_file`
|
||||
|
||||
**Tip**: If you are porting your code from youtube-dl to yt-dlp, one important point to look out for is that we do not guarantee the return value of `YoutubeDL.extract_info` to be json serializable, or even be a dictionary. It will be dictionary-like, but if you want to ensure it is a serializable dictionary, pass it through `YoutubeDL.sanitize_info` as shown in the example above
|
||||
|
||||
|
||||
<!-- MANPAGE: MOVE "NEW FEATURES" SECTION HERE -->
|
||||
|
||||
# DEPRECATED OPTIONS
|
||||
|
||||
These are all the deprecated options and the current alternative to achieve the same effect
|
||||
@@ -1741,7 +1829,7 @@ #### Old aliases
|
||||
--yes-overwrites --force-overwrites
|
||||
|
||||
#### Sponskrub Options
|
||||
Support for [SponSkrub](https://github.com/faissaloo/SponSkrub) has been deprecated in favor of `--sponsorblock`
|
||||
Support for [SponSkrub](https://github.com/faissaloo/SponSkrub) has been deprecated in favor of the `--sponsorblock` options
|
||||
|
||||
--sponskrub --sponsorblock-mark all
|
||||
--no-sponskrub --no-sponsorblock
|
||||
|
||||
@@ -39,12 +39,6 @@ class {name}({bases}):
|
||||
_module = '{module}'
|
||||
'''
|
||||
|
||||
make_valid_template = '''
|
||||
@classmethod
|
||||
def _make_valid_url(cls):
|
||||
return {valid_url!r}
|
||||
'''
|
||||
|
||||
|
||||
def get_base_name(base):
|
||||
if base is InfoExtractor:
|
||||
@@ -61,15 +55,14 @@ def build_lazy_ie(ie, name):
|
||||
bases=', '.join(map(get_base_name, ie.__bases__)),
|
||||
module=ie.__module__)
|
||||
valid_url = getattr(ie, '_VALID_URL', None)
|
||||
if not valid_url and hasattr(ie, '_make_valid_url'):
|
||||
valid_url = ie._make_valid_url()
|
||||
if valid_url:
|
||||
s += f' _VALID_URL = {valid_url!r}\n'
|
||||
if not ie._WORKING:
|
||||
s += ' _WORKING = False\n'
|
||||
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
||||
s += f'\n{getsource(ie.suitable)}'
|
||||
if hasattr(ie, '_make_valid_url'):
|
||||
# search extractors
|
||||
s += make_valid_template.format(valid_url=ie._make_valid_url())
|
||||
return s
|
||||
|
||||
|
||||
|
||||
@@ -13,12 +13,14 @@
|
||||
|
||||
# NAME
|
||||
|
||||
youtube\-dl \- download videos from youtube.com or other video platforms
|
||||
yt\-dlp \- A youtube-dl fork with additional features and patches
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
**yt-dlp** \[OPTIONS\] URL [URL...]
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
'''
|
||||
|
||||
|
||||
@@ -33,47 +35,63 @@ def main():
|
||||
with io.open(README_FILE, encoding='utf-8') as f:
|
||||
readme = f.read()
|
||||
|
||||
readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme)
|
||||
readme = re.sub(r'\s+yt-dlp \[OPTIONS\] URL \[URL\.\.\.\]', '', readme)
|
||||
readme = PREFIX + readme
|
||||
|
||||
readme = filter_excluded_sections(readme)
|
||||
readme = move_sections(readme)
|
||||
readme = filter_options(readme)
|
||||
|
||||
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
||||
outf.write(readme)
|
||||
outf.write(PREFIX + readme)
|
||||
|
||||
|
||||
def filter_excluded_sections(readme):
|
||||
EXCLUDED_SECTION_BEGIN_STRING = re.escape('<!-- MANPAGE: BEGIN EXCLUDED SECTION -->')
|
||||
EXCLUDED_SECTION_END_STRING = re.escape('<!-- MANPAGE: END EXCLUDED SECTION -->')
|
||||
return re.sub(
|
||||
rf'(?s){EXCLUDED_SECTION_BEGIN_STRING}.+?{EXCLUDED_SECTION_END_STRING}\n',
|
||||
'', readme)
|
||||
|
||||
|
||||
def move_sections(readme):
|
||||
MOVE_TAG_TEMPLATE = '<!-- MANPAGE: MOVE "%s" SECTION HERE -->'
|
||||
sections = re.findall(r'(?m)^%s$' % (
|
||||
re.escape(MOVE_TAG_TEMPLATE).replace(r'\%', '%') % '(.+)'), readme)
|
||||
|
||||
for section_name in sections:
|
||||
move_tag = MOVE_TAG_TEMPLATE % section_name
|
||||
if readme.count(move_tag) > 1:
|
||||
raise Exception(f'There is more than one occurrence of "{move_tag}". This is unexpected')
|
||||
|
||||
sections = re.findall(rf'(?sm)(^# {re.escape(section_name)}.+?)(?=^# )', readme)
|
||||
if len(sections) < 1:
|
||||
raise Exception(f'The section {section_name} does not exist')
|
||||
elif len(sections) > 1:
|
||||
raise Exception(f'There are multiple occurrences of section {section_name}, this is unhandled')
|
||||
|
||||
readme = readme.replace(sections[0], '', 1).replace(move_tag, sections[0], 1)
|
||||
return readme
|
||||
|
||||
|
||||
def filter_options(readme):
|
||||
ret = ''
|
||||
in_options = False
|
||||
for line in readme.split('\n'):
|
||||
if line.startswith('# '):
|
||||
if line[2:].startswith('OPTIONS'):
|
||||
in_options = True
|
||||
else:
|
||||
in_options = False
|
||||
section = re.search(r'(?sm)^# USAGE AND OPTIONS\n.+?(?=^# )', readme).group(0)
|
||||
options = '# OPTIONS\n'
|
||||
for line in section.split('\n')[1:]:
|
||||
if line.lstrip().startswith('-'):
|
||||
split = re.split(r'\s{2,}', line.lstrip())
|
||||
# Description string may start with `-` as well. If there is
|
||||
# only one piece then it's a description bit not an option.
|
||||
if len(split) > 1:
|
||||
option, description = split
|
||||
split_option = option.split(' ')
|
||||
|
||||
if in_options:
|
||||
if line.lstrip().startswith('-'):
|
||||
split = re.split(r'\s{2,}', line.lstrip())
|
||||
# Description string may start with `-` as well. If there is
|
||||
# only one piece then it's a description bit not an option.
|
||||
if len(split) > 1:
|
||||
option, description = split
|
||||
split_option = option.split(' ')
|
||||
if not split_option[-1].startswith('-'): # metavar
|
||||
option = ' '.join(split_option[:-1] + [f'*{split_option[-1]}*'])
|
||||
|
||||
if not split_option[-1].startswith('-'): # metavar
|
||||
option = ' '.join(split_option[:-1] + ['*%s*' % split_option[-1]])
|
||||
# Pandoc's definition_lists. See http://pandoc.org/README.html
|
||||
options += f'\n{option}\n: {description}\n'
|
||||
continue
|
||||
options += line.lstrip() + '\n'
|
||||
|
||||
# Pandoc's definition_lists. See http://pandoc.org/README.html
|
||||
# for more information.
|
||||
ret += '\n%s\n: %s\n' % (option, description)
|
||||
continue
|
||||
ret += line.lstrip() + '\n'
|
||||
else:
|
||||
ret += line + '\n'
|
||||
|
||||
return ret
|
||||
return readme.replace(section, options, 1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -1,33 +1,42 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from datetime import datetime
|
||||
# import urllib.request
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
# response = urllib.request.urlopen('https://blackjack4494.github.io/youtube-dlc/update/LATEST_VERSION')
|
||||
# old_version = response.read().decode('utf-8')
|
||||
|
||||
exec(compile(open('yt_dlp/version.py').read(), 'yt_dlp/version.py', 'exec'))
|
||||
with open('yt_dlp/version.py', 'rt') as f:
|
||||
exec(compile(f.read(), 'yt_dlp/version.py', 'exec'))
|
||||
old_version = locals()['__version__']
|
||||
|
||||
old_version_list = old_version.split(".", 4)
|
||||
old_version_list = old_version.split('.')
|
||||
|
||||
old_ver = '.'.join(old_version_list[:3])
|
||||
old_rev = old_version_list[3] if len(old_version_list) > 3 else ''
|
||||
|
||||
ver = datetime.utcnow().strftime("%Y.%m.%d")
|
||||
rev = str(int(old_rev or 0) + 1) if old_ver == ver else ''
|
||||
|
||||
rev = (sys.argv[1:] or [''])[0] # Use first argument, if present as revision number
|
||||
if not rev:
|
||||
rev = str(int(old_rev or 0) + 1) if old_ver == ver else ''
|
||||
|
||||
VERSION = '.'.join((ver, rev)) if rev else ver
|
||||
# VERSION_LIST = [(int(v) for v in ver.split(".") + [rev or 0])]
|
||||
|
||||
try:
|
||||
sp = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE)
|
||||
GIT_HEAD = sp.communicate()[0].decode().strip() or None
|
||||
except Exception:
|
||||
GIT_HEAD = None
|
||||
|
||||
VERSION_FILE = f'''\
|
||||
# Autogenerated by devscripts/update-version.py
|
||||
|
||||
__version__ = {VERSION!r}
|
||||
|
||||
RELEASE_GIT_HEAD = {GIT_HEAD!r}
|
||||
'''
|
||||
|
||||
with open('yt_dlp/version.py', 'wt') as f:
|
||||
f.write(VERSION_FILE)
|
||||
|
||||
print('::set-output name=ytdlp_version::' + VERSION)
|
||||
|
||||
file_version_py = open('yt_dlp/version.py', 'rt')
|
||||
data = file_version_py.read()
|
||||
data = data.replace(old_version, VERSION)
|
||||
file_version_py.close()
|
||||
|
||||
file_version_py = open('yt_dlp/version.py', 'wt')
|
||||
file_version_py.write(data)
|
||||
file_version_py.close()
|
||||
print(f'\nVersion = {VERSION}, Git HEAD = {GIT_HEAD}')
|
||||
|
||||
5
docs/Contributing.md
Normal file
5
docs/Contributing.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
orphan: true
|
||||
---
|
||||
```{include} ../Contributing.md
|
||||
```
|
||||
@@ -40,7 +40,7 @@ def main():
|
||||
'--icon=devscripts/logo.ico',
|
||||
'--upx-exclude=vcruntime140.dll',
|
||||
'--noconfirm',
|
||||
*dependancy_options(),
|
||||
*dependency_options(),
|
||||
*opts,
|
||||
'yt_dlp/__main__.py',
|
||||
]
|
||||
@@ -73,11 +73,11 @@ def version_to_list(version):
|
||||
return list(map(int, version_list)) + [0] * (4 - len(version_list))
|
||||
|
||||
|
||||
def dependancy_options():
|
||||
dependancies = [pycryptodome_module(), 'mutagen'] + collect_submodules('websockets')
|
||||
def dependency_options():
|
||||
dependencies = [pycryptodome_module(), 'mutagen'] + collect_submodules('websockets')
|
||||
excluded_modules = ['test', 'ytdlp_plugins', 'youtube-dl', 'youtube-dlc']
|
||||
|
||||
yield from (f'--hidden-import={module}' for module in dependancies)
|
||||
yield from (f'--hidden-import={module}' for module in dependencies)
|
||||
yield from (f'--exclude-module={module}' for module in excluded_modules)
|
||||
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ # Supported sites
|
||||
- **9now.com.au**
|
||||
- **abc.net.au**
|
||||
- **abc.net.au:iview**
|
||||
- **abc.net.au:iview:showseries**
|
||||
- **abcnews**
|
||||
- **abcnews:video**
|
||||
- **abcotvs**: ABC Owned Television Stations
|
||||
@@ -141,6 +142,7 @@ # Supported sites
|
||||
- **BlackboardCollaborate**
|
||||
- **BleacherReport**
|
||||
- **BleacherReportCMS**
|
||||
- **blogger.com**
|
||||
- **Bloomberg**
|
||||
- **BokeCC**
|
||||
- **BongaCams**
|
||||
@@ -150,6 +152,7 @@ # Supported sites
|
||||
- **BR**: Bayerischer Rundfunk
|
||||
- **BravoTV**
|
||||
- **Break**
|
||||
- **BreitBart**
|
||||
- **brightcove:legacy**
|
||||
- **brightcove:new**
|
||||
- **BRMediathek**: Bayerischer Rundfunk Mediathek
|
||||
@@ -158,11 +161,13 @@ # Supported sites
|
||||
- **BusinessInsider**
|
||||
- **BuzzFeed**
|
||||
- **BYUtv**
|
||||
- **CableAV**
|
||||
- **CAM4**
|
||||
- **Camdemy**
|
||||
- **CamdemyFolder**
|
||||
- **CamModels**
|
||||
- **CamWithHer**
|
||||
- **CanalAlpha**
|
||||
- **canalc2.tv**
|
||||
- **Canalplus**: mycanal.fr and piwiplus.fr
|
||||
- **Canvas**
|
||||
@@ -222,6 +227,8 @@ # Supported sites
|
||||
- **CONtv**
|
||||
- **Corus**
|
||||
- **Coub**
|
||||
- **CozyTV**
|
||||
- **cp24**
|
||||
- **Cracked**
|
||||
- **Crackle**
|
||||
- **CrooksAndLiars**
|
||||
@@ -236,7 +243,8 @@ # Supported sites
|
||||
- **cu.ntv.co.jp**: Nippon Television Network
|
||||
- **CultureUnplugged**
|
||||
- **curiositystream**
|
||||
- **curiositystream:collection**
|
||||
- **curiositystream:collections**
|
||||
- **curiositystream:series**
|
||||
- **CWTV**
|
||||
- **DagelijkseKost**: dagelijksekost.een.be
|
||||
- **DailyMail**
|
||||
@@ -266,6 +274,8 @@ # Supported sites
|
||||
- **DiscoveryPlus**
|
||||
- **DiscoveryPlusIndia**
|
||||
- **DiscoveryPlusIndiaShow**
|
||||
- **DiscoveryPlusItaly**
|
||||
- **DiscoveryPlusItalyShow**
|
||||
- **DiscoveryVR**
|
||||
- **Disney**
|
||||
- **DIYNetwork**
|
||||
@@ -279,6 +289,8 @@ # Supported sites
|
||||
- **DPlay**
|
||||
- **DRBonanza**
|
||||
- **Dropbox**
|
||||
- **Dropout**
|
||||
- **DropoutSeason**
|
||||
- **DrTuber**
|
||||
- **drtv**
|
||||
- **drtv:live**
|
||||
@@ -315,6 +327,7 @@ # Supported sites
|
||||
- **Escapist**
|
||||
- **ESPN**
|
||||
- **ESPNArticle**
|
||||
- **ESPNCricInfo**
|
||||
- **EsriVideo**
|
||||
- **Europa**
|
||||
- **EUScreen**
|
||||
@@ -370,6 +383,12 @@ # Supported sites
|
||||
- **GabTV**
|
||||
- **Gaia**
|
||||
- **GameInformer**
|
||||
- **GameJolt**
|
||||
- **GameJoltCommunity**
|
||||
- **GameJoltGame**
|
||||
- **GameJoltGameSoundtrack**
|
||||
- **GameJoltSearch**
|
||||
- **GameJoltUser**
|
||||
- **GameSpot**
|
||||
- **GameStar**
|
||||
- **Gaskrank**
|
||||
@@ -390,6 +409,7 @@ # Supported sites
|
||||
- **GloboArticle**
|
||||
- **Go**
|
||||
- **GodTube**
|
||||
- **Gofile**
|
||||
- **Golem**
|
||||
- **google:podcasts**
|
||||
- **google:podcasts:feed**
|
||||
@@ -427,6 +447,8 @@ # Supported sites
|
||||
- **hrfernsehen**
|
||||
- **HRTi**
|
||||
- **HRTiPlaylist**
|
||||
- **HSEProduct**
|
||||
- **HSEShow**
|
||||
- **Huajiao**: 花椒直播
|
||||
- **HuffPost**: Huffington Post
|
||||
- **Hungama**
|
||||
@@ -448,7 +470,7 @@ # Supported sites
|
||||
- **IndavideoEmbed**
|
||||
- **InfoQ**
|
||||
- **Instagram**
|
||||
- **instagram:tag**: Instagram hashtag search
|
||||
- **instagram:tag**: Instagram hashtag search URLs
|
||||
- **instagram:user**: Instagram user profile
|
||||
- **InstagramIOS**: IOS instagram:// URL
|
||||
- **Internazionale**
|
||||
@@ -524,6 +546,7 @@ # Supported sites
|
||||
- **LineLive**
|
||||
- **LineLiveChannel**
|
||||
- **LineTV**
|
||||
- **LinkedIn**
|
||||
- **linkedin:learning**
|
||||
- **linkedin:learning:course**
|
||||
- **LinuxAcademy**
|
||||
@@ -591,6 +614,7 @@ # Supported sites
|
||||
- **mirrativ**
|
||||
- **mirrativ:user**
|
||||
- **MiTele**: mitele.es
|
||||
- **mixch**
|
||||
- **mixcloud**
|
||||
- **mixcloud:playlist**
|
||||
- **mixcloud:user**
|
||||
@@ -641,6 +665,8 @@ # Supported sites
|
||||
- **n-tv.de**
|
||||
- **N1Info:article**
|
||||
- **N1InfoAsset**
|
||||
- **Nate**
|
||||
- **NateProgram**
|
||||
- **natgeo:video**
|
||||
- **NationalGeographicTV**
|
||||
- **Naver**
|
||||
@@ -663,6 +689,7 @@ # Supported sites
|
||||
- **ndr:embed:base**
|
||||
- **NDTV**
|
||||
- **Nebula**
|
||||
- **nebula:collection**
|
||||
- **NerdCubedFeed**
|
||||
- **netease:album**: 网易云音乐 - 专辑
|
||||
- **netease:djradio**: 网易云音乐 - 电台
|
||||
@@ -696,8 +723,8 @@ # Supported sites
|
||||
- **niconico**: ニコニコ動画
|
||||
- **NiconicoPlaylist**
|
||||
- **NiconicoUser**
|
||||
- **nicovideo:search**: Nico video searches; "nicosearch:" prefix
|
||||
- **nicovideo:search:date**: Nico video searches, newest first; "nicosearchdate:" prefix
|
||||
- **nicovideo:search**: Nico video search; "nicosearch:" prefix
|
||||
- **nicovideo:search:date**: Nico video search, newest first; "nicosearchdate:" prefix
|
||||
- **nicovideo:search_url**: Nico video search URLs
|
||||
- **Nintendo**
|
||||
- **Nitter**
|
||||
@@ -746,6 +773,7 @@ # Supported sites
|
||||
- **OlympicsReplay**
|
||||
- **on24**: ON24
|
||||
- **OnDemandKorea**
|
||||
- **OneFootball**
|
||||
- **onet.pl**
|
||||
- **onet.tv**
|
||||
- **onet.tv:channel**
|
||||
@@ -753,6 +781,8 @@ # Supported sites
|
||||
- **OnionStudios**
|
||||
- **Ooyala**
|
||||
- **OoyalaExternal**
|
||||
- **Opencast**
|
||||
- **OpencastPlaylist**
|
||||
- **openrec**
|
||||
- **openrec:capture**
|
||||
- **OraTV**
|
||||
@@ -788,6 +818,7 @@ # Supported sites
|
||||
- **PatreonUser**
|
||||
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||
- **PearVideo**
|
||||
- **peer.tv**
|
||||
- **PeerTube**
|
||||
- **PeerTube:Playlist**
|
||||
- **peloton**
|
||||
@@ -873,6 +904,7 @@ # Supported sites
|
||||
- **RadioJavan**
|
||||
- **radiokapital**
|
||||
- **radiokapital:show**
|
||||
- **RadioZetPodcast**
|
||||
- **radlive**
|
||||
- **radlive:channel**
|
||||
- **radlive:season**
|
||||
@@ -897,7 +929,9 @@ # Supported sites
|
||||
- **RedBullTV**
|
||||
- **RedBullTVRrnContent**
|
||||
- **Reddit**
|
||||
- **RedditR**
|
||||
- **RedGifs**
|
||||
- **RedGifsSearch**: Redgifs search
|
||||
- **RedGifsUser**: Redgifs user
|
||||
- **RedTube**
|
||||
- **RegioTV**
|
||||
- **RENTV**
|
||||
@@ -921,8 +955,10 @@ # Supported sites
|
||||
- **rtl2:you**
|
||||
- **rtl2:you:series**
|
||||
- **RTP**
|
||||
- **RTRFM**
|
||||
- **RTS**: RTS.ch
|
||||
- **rtve.es:alacarta**: RTVE a la carta
|
||||
- **rtve.es:audio**: RTVE audio
|
||||
- **rtve.es:infantil**: RTVE infantil
|
||||
- **rtve.es:live**: RTVE.es live streams
|
||||
- **rtve.es:television**
|
||||
@@ -932,11 +968,12 @@ # Supported sites
|
||||
- **RumbleChannel**
|
||||
- **RumbleEmbed**
|
||||
- **rutube**: Rutube videos
|
||||
- **rutube:channel**: Rutube channels
|
||||
- **rutube:channel**: Rutube channel
|
||||
- **rutube:embed**: Rutube embedded videos
|
||||
- **rutube:movie**: Rutube movies
|
||||
- **rutube:person**: Rutube person videos
|
||||
- **rutube:playlist**: Rutube playlists
|
||||
- **rutube:tags**: Rutube tags
|
||||
- **RUTV**: RUTV.RU
|
||||
- **Ruutu**
|
||||
- **Ruv**
|
||||
@@ -960,6 +997,7 @@ # Supported sites
|
||||
- **SCTE**
|
||||
- **SCTECourse**
|
||||
- **Seeker**
|
||||
- **SenateGov**
|
||||
- **SenateISVP**
|
||||
- **SendtoNews**
|
||||
- **Servus**
|
||||
@@ -975,6 +1013,7 @@ # Supported sites
|
||||
- **simplecast:episode**
|
||||
- **simplecast:podcast**
|
||||
- **Sina**
|
||||
- **Skeb**
|
||||
- **sky.it**
|
||||
- **sky:news**
|
||||
- **sky:news:story**
|
||||
@@ -994,6 +1033,7 @@ # Supported sites
|
||||
- **SonyLIVSeries**
|
||||
- **soundcloud**
|
||||
- **soundcloud:playlist**
|
||||
- **soundcloud:related**
|
||||
- **soundcloud:search**: Soundcloud search; "scsearch:" prefix
|
||||
- **soundcloud:set**
|
||||
- **soundcloud:trackstation**
|
||||
@@ -1038,8 +1078,10 @@ # Supported sites
|
||||
- **Streamanity**
|
||||
- **streamcloud.eu**
|
||||
- **StreamCZ**
|
||||
- **StreamFF**
|
||||
- **StreetVoice**
|
||||
- **StretchInternet**
|
||||
- **Stripchat**
|
||||
- **stv:player**
|
||||
- **SunPorno**
|
||||
- **sverigesradio:episode**
|
||||
@@ -1105,6 +1147,7 @@ # Supported sites
|
||||
- **TNAFlix**
|
||||
- **TNAFlixNetworkEmbed**
|
||||
- **toggle**
|
||||
- **toggo**
|
||||
- **Tokentube**
|
||||
- **Tokentube:channel**
|
||||
- **ToonGoggles**
|
||||
@@ -1117,6 +1160,7 @@ # Supported sites
|
||||
- **TrovoChannelClip**: All Clips of a trovo.live channel; "trovoclip:" prefix
|
||||
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
||||
- **TrovoVod**
|
||||
- **TrueID**
|
||||
- **TruNews**
|
||||
- **TruTV**
|
||||
- **Tube8**
|
||||
@@ -1311,6 +1355,7 @@ # Supported sites
|
||||
- **WeiboMobile**
|
||||
- **WeiqiTV**: WQTV
|
||||
- **whowatch**
|
||||
- **Willow**
|
||||
- **WimTV**
|
||||
- **Wistia**
|
||||
- **WistiaPlaylist**
|
||||
@@ -1370,8 +1415,8 @@ # Supported sites
|
||||
- **youtube:history**: Youtube watch history; ":ythis" keyword (requires cookies)
|
||||
- **youtube:playlist**: YouTube playlists
|
||||
- **youtube:recommended**: YouTube recommended videos; ":ytrec" keyword
|
||||
- **youtube:search**: YouTube searches; "ytsearch:" prefix
|
||||
- **youtube:search:date**: YouTube searches, newest videos first; "ytsearchdate:" prefix
|
||||
- **youtube:search**: YouTube search; "ytsearch:" prefix
|
||||
- **youtube:search:date**: YouTube search, newest videos first; "ytsearchdate:" prefix
|
||||
- **youtube:search_url**: YouTube search URLs with sorting and filter support
|
||||
- **youtube:subscriptions**: YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)
|
||||
- **youtube:tab**: YouTube Tabs
|
||||
|
||||
@@ -194,6 +194,51 @@ def expect_dict(self, got_dict, expected_dict):
|
||||
expect_value(self, got, expected, info_field)
|
||||
|
||||
|
||||
def sanitize_got_info_dict(got_dict):
|
||||
IGNORED_FIELDS = (
|
||||
# Format keys
|
||||
'url', 'manifest_url', 'format', 'format_id', 'format_note', 'width', 'height', 'resolution',
|
||||
'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'vbr', 'fps', 'vcodec', 'container', 'filesize',
|
||||
'filesize_approx', 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'preference',
|
||||
'language', 'language_preference', 'quality', 'source_preference', 'http_headers',
|
||||
'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
|
||||
|
||||
# RTMP formats
|
||||
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time',
|
||||
|
||||
# Lists
|
||||
'formats', 'thumbnails', 'subtitles', 'automatic_captions', 'comments', 'entries',
|
||||
|
||||
# Auto-generated
|
||||
'autonumber', 'playlist', 'format_index', 'video_ext', 'audio_ext', 'duration_string', 'epoch',
|
||||
'fulltitle', 'extractor', 'extractor_key', 'filepath', 'infojson_filename', 'original_url',
|
||||
|
||||
# Only live_status needs to be checked
|
||||
'is_live', 'was_live',
|
||||
)
|
||||
|
||||
IGNORED_PREFIXES = ('', 'playlist', 'requested', 'webpage')
|
||||
|
||||
def sanitize(key, value):
|
||||
if isinstance(value, str) and len(value) > 100:
|
||||
return f'md5:{md5(value)}'
|
||||
elif isinstance(value, list) and len(value) > 10:
|
||||
return f'count:{len(value)}'
|
||||
return value
|
||||
|
||||
test_info_dict = {
|
||||
key: sanitize(key, value) for key, value in got_dict.items()
|
||||
if value is not None and key not in IGNORED_FIELDS and not any(
|
||||
key.startswith(f'{prefix}_') for prefix in IGNORED_PREFIXES)
|
||||
}
|
||||
|
||||
# display_id may be generated from id
|
||||
if test_info_dict.get('display_id') == test_info_dict['id']:
|
||||
test_info_dict.pop('display_id')
|
||||
|
||||
return test_info_dict
|
||||
|
||||
|
||||
def expect_info_dict(self, got_dict, expected_dict):
|
||||
expect_dict(self, got_dict, expected_dict)
|
||||
# Check for the presence of mandatory fields
|
||||
@@ -207,10 +252,8 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
|
||||
|
||||
# Are checkable fields missing from the test case definition?
|
||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||
for key, value in got_dict.items()
|
||||
if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location', 'age_limit'))
|
||||
test_info_dict = sanitize_got_info_dict(got_dict)
|
||||
|
||||
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
|
||||
if missing_keys:
|
||||
def _repr(v):
|
||||
|
||||
@@ -99,10 +99,10 @@ def test_html_search_meta(self):
|
||||
self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True)
|
||||
|
||||
def test_search_json_ld_realworld(self):
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/23306
|
||||
expect_dict(
|
||||
self,
|
||||
self.ie._search_json_ld(r'''<script type="application/ld+json">
|
||||
_TESTS = [
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/23306
|
||||
(
|
||||
r'''<script type="application/ld+json">
|
||||
{
|
||||
"@context": "http://schema.org/",
|
||||
"@type": "VideoObject",
|
||||
@@ -135,17 +135,86 @@ def test_search_json_ld_realworld(self):
|
||||
"name": "Kleio Valentien",
|
||||
"url": "https://www.eporner.com/pornstar/kleio-valentien/"
|
||||
}]}
|
||||
</script>''', None),
|
||||
{
|
||||
'title': '1 On 1 With Kleio',
|
||||
'description': 'Kleio Valentien',
|
||||
'url': 'https://gvideo.eporner.com/xN49A1cT3eB/xN49A1cT3eB.mp4',
|
||||
'timestamp': 1449347075,
|
||||
'duration': 743.0,
|
||||
'view_count': 1120958,
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
})
|
||||
</script>''',
|
||||
{
|
||||
'title': '1 On 1 With Kleio',
|
||||
'description': 'Kleio Valentien',
|
||||
'url': 'https://gvideo.eporner.com/xN49A1cT3eB/xN49A1cT3eB.mp4',
|
||||
'timestamp': 1449347075,
|
||||
'duration': 743.0,
|
||||
'view_count': 1120958,
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
},
|
||||
{},
|
||||
),
|
||||
(
|
||||
r'''<script type="application/ld+json">
|
||||
{
|
||||
"@context": "https://schema.org",
|
||||
"@graph": [
|
||||
{
|
||||
"@type": "NewsArticle",
|
||||
"mainEntityOfPage": {
|
||||
"@type": "WebPage",
|
||||
"@id": "https://www.ant1news.gr/Society/article/620286/symmoria-anilikon-dikigoros-thymaton-ithelan-na-toys-apoteleiosoyn"
|
||||
},
|
||||
"headline": "Συμμορία ανηλίκων – δικηγόρος θυμάτων: ήθελαν να τους αποτελειώσουν",
|
||||
"name": "Συμμορία ανηλίκων – δικηγόρος θυμάτων: ήθελαν να τους αποτελειώσουν",
|
||||
"description": "Τα παιδιά δέχθηκαν την επίθεση επειδή αρνήθηκαν να γίνουν μέλη της συμμορίας, ανέφερε ο Γ. Ζαχαρόπουλος.",
|
||||
"image": {
|
||||
"@type": "ImageObject",
|
||||
"url": "https://ant1media.azureedge.net/imgHandler/1100/a635c968-be71-447c-bf9c-80d843ece21e.jpg",
|
||||
"width": 1100,
|
||||
"height": 756 },
|
||||
"datePublished": "2021-11-10T08:50:00+03:00",
|
||||
"dateModified": "2021-11-10T08:52:53+03:00",
|
||||
"author": {
|
||||
"@type": "Person",
|
||||
"@id": "https://www.ant1news.gr/",
|
||||
"name": "Ant1news",
|
||||
"image": "https://www.ant1news.gr/images/logo-e5d7e4b3e714c88e8d2eca96130142f6.png",
|
||||
"url": "https://www.ant1news.gr/"
|
||||
},
|
||||
"publisher": {
|
||||
"@type": "Organization",
|
||||
"@id": "https://www.ant1news.gr#publisher",
|
||||
"name": "Ant1news",
|
||||
"url": "https://www.ant1news.gr",
|
||||
"logo": {
|
||||
"@type": "ImageObject",
|
||||
"url": "https://www.ant1news.gr/images/logo-e5d7e4b3e714c88e8d2eca96130142f6.png",
|
||||
"width": 400,
|
||||
"height": 400 },
|
||||
"sameAs": [
|
||||
"https://www.facebook.com/Ant1news.gr",
|
||||
"https://twitter.com/antennanews",
|
||||
"https://www.youtube.com/channel/UC0smvAbfczoN75dP0Hw4Pzw",
|
||||
"https://www.instagram.com/ant1news/"
|
||||
]
|
||||
},
|
||||
|
||||
"keywords": "μαχαίρωμα,συμμορία ανηλίκων,ΕΙΔΗΣΕΙΣ,ΕΙΔΗΣΕΙΣ ΣΗΜΕΡΑ,ΝΕΑ,Κοινωνία - Ant1news",
|
||||
|
||||
|
||||
"articleSection": "Κοινωνία"
|
||||
}
|
||||
]
|
||||
}
|
||||
</script>''',
|
||||
{
|
||||
'timestamp': 1636523400,
|
||||
'title': 'md5:91fe569e952e4d146485740ae927662b',
|
||||
},
|
||||
{'expected_type': 'NewsArticle'},
|
||||
),
|
||||
]
|
||||
for html, expected_dict, search_json_ld_kwargs in _TESTS:
|
||||
expect_dict(
|
||||
self,
|
||||
self.ie._search_json_ld(html, None, **search_json_ld_kwargs),
|
||||
expected_dict
|
||||
)
|
||||
|
||||
def test_download_json(self):
|
||||
uri = encode_data_uri(b'{"foo": "blah"}', 'application/json')
|
||||
|
||||
@@ -137,7 +137,7 @@ def test(inp, *expected, multi=False):
|
||||
test('webm/mp4', '47')
|
||||
test('3gp/40/mp4', '35')
|
||||
test('example-with-dashes', 'example-with-dashes')
|
||||
test('all', '35', 'example-with-dashes', '45', '47', '2') # Order doesn't actually matter for this
|
||||
test('all', '2', '47', '45', 'example-with-dashes', '35')
|
||||
test('mergeall', '2+47+45+example-with-dashes+35', multi=True)
|
||||
|
||||
def test_format_selection_audio(self):
|
||||
@@ -520,7 +520,7 @@ def test_format_filtering(self):
|
||||
ydl = YDL({'format': 'all[width>=400][width<=600]'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||
self.assertEqual(downloaded_ids, ['B', 'C', 'D'])
|
||||
self.assertEqual(downloaded_ids, ['D', 'C', 'B'])
|
||||
|
||||
ydl = YDL({'format': 'best[height<40]'})
|
||||
try:
|
||||
@@ -717,6 +717,7 @@ def test(tmpl, expected, *, info=None, **params):
|
||||
test('%(id)s', '.abcd', info={'id': '.abcd'})
|
||||
test('%(id)s', 'ab__cd', info={'id': 'ab__cd'})
|
||||
test('%(id)s', ('ab:cd', 'ab -cd'), info={'id': 'ab:cd'})
|
||||
test('%(id.0)s', '-', info={'id': '--'})
|
||||
|
||||
# Invalid templates
|
||||
self.assertTrue(isinstance(YoutubeDL.validate_outtmpl('%(title)'), ValueError))
|
||||
@@ -777,6 +778,10 @@ def expect_same_infodict(out):
|
||||
test('%(title5)#U', 'a\u0301e\u0301i\u0301 𝐀')
|
||||
test('%(title5)+U', 'áéí A')
|
||||
test('%(title5)+#U', 'a\u0301e\u0301i\u0301 A')
|
||||
test('%(height)D', '1K')
|
||||
test('%(height)5.2D', ' 1.08K')
|
||||
test('%(title4)#S', 'foo_bar_test')
|
||||
test('%(title4).10S', ('foo \'bar\' ', 'foo \'bar\'' + ('#' if compat_os_name == 'nt' else ' ')))
|
||||
if compat_os_name == 'nt':
|
||||
test('%(title4)q', ('"foo \\"bar\\" test"', "'foo _'bar_' test'"))
|
||||
test('%(formats.:.id)#q', ('"id 1" "id 2" "id 3"', "'id 1' 'id 2' 'id 3'"))
|
||||
@@ -808,6 +813,11 @@ def expect_same_infodict(out):
|
||||
test('%(width-100,height+width|def)s', 'def')
|
||||
test('%(timestamp-x>%H\\,%M\\,%S,timestamp>%H\\,%M\\,%S)s', '12,00,00')
|
||||
|
||||
# Replacement
|
||||
test('%(id&foo)s.bar', 'foo.bar')
|
||||
test('%(title&foo)s.bar', 'NA.bar')
|
||||
test('%(title&foo|baz)s.bar', 'baz.bar')
|
||||
|
||||
# Laziness
|
||||
def gen():
|
||||
yield from range(5)
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
from yt_dlp.aes import (
|
||||
aes_decrypt,
|
||||
aes_encrypt,
|
||||
aes_ecb_encrypt,
|
||||
aes_ecb_decrypt,
|
||||
aes_cbc_decrypt,
|
||||
aes_cbc_decrypt_bytes,
|
||||
aes_cbc_encrypt,
|
||||
@@ -17,7 +19,8 @@
|
||||
aes_ctr_encrypt,
|
||||
aes_gcm_decrypt_and_verify,
|
||||
aes_gcm_decrypt_and_verify_bytes,
|
||||
aes_decrypt_text
|
||||
aes_decrypt_text,
|
||||
BLOCK_SIZE_BYTES,
|
||||
)
|
||||
from yt_dlp.compat import compat_pycrypto_AES
|
||||
from yt_dlp.utils import bytes_to_intlist, intlist_to_bytes
|
||||
@@ -94,6 +97,19 @@ def test_decrypt_text(self):
|
||||
decrypted = (aes_decrypt_text(encrypted, password, 32))
|
||||
self.assertEqual(decrypted, self.secret_msg)
|
||||
|
||||
def test_ecb_encrypt(self):
|
||||
data = bytes_to_intlist(self.secret_msg)
|
||||
data += [0x08] * (BLOCK_SIZE_BYTES - len(data) % BLOCK_SIZE_BYTES)
|
||||
encrypted = intlist_to_bytes(aes_ecb_encrypt(data, self.key, self.iv))
|
||||
self.assertEqual(
|
||||
encrypted,
|
||||
b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:')
|
||||
|
||||
def test_ecb_decrypt(self):
|
||||
data = bytes_to_intlist(b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:')
|
||||
decrypted = intlist_to_bytes(aes_ecb_decrypt(data, self.key, self.iv))
|
||||
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -38,7 +38,6 @@ def test_youtube_playlist_matching(self):
|
||||
assertTab('https://www.youtube.com/AsapSCIENCE')
|
||||
assertTab('https://www.youtube.com/embedded')
|
||||
assertTab('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||
assertTab('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertTab('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||
assertTab('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668
|
||||
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
|
||||
|
||||
@@ -124,11 +124,11 @@ def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(30, 40, 'preview'),
|
||||
self._sponsor_chapter(50, 60, 'sponsor')]
|
||||
self._sponsor_chapter(50, 60, 'filler')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 30, 40, 50, 60, 70],
|
||||
['c', '[SponsorBlock]: Sponsor', 'c', '[SponsorBlock]: Preview/Recap',
|
||||
'c', '[SponsorBlock]: Sponsor', 'c'])
|
||||
'c', '[SponsorBlock]: Filler Tangent', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self):
|
||||
|
||||
@@ -1156,9 +1156,16 @@ def test_parse_count(self):
|
||||
self.assertEqual(parse_count('1000'), 1000)
|
||||
self.assertEqual(parse_count('1.000'), 1000)
|
||||
self.assertEqual(parse_count('1.1k'), 1100)
|
||||
self.assertEqual(parse_count('1.1 k'), 1100)
|
||||
self.assertEqual(parse_count('1,1 k'), 1100)
|
||||
self.assertEqual(parse_count('1.1kk'), 1100000)
|
||||
self.assertEqual(parse_count('1.1kk '), 1100000)
|
||||
self.assertEqual(parse_count('1,1kk'), 1100000)
|
||||
self.assertEqual(parse_count('100 views'), 100)
|
||||
self.assertEqual(parse_count('1,100 views'), 1100)
|
||||
self.assertEqual(parse_count('1.1kk views'), 1100000)
|
||||
self.assertEqual(parse_count('10M views'), 10000000)
|
||||
self.assertEqual(parse_count('has 10M views'), 10000000)
|
||||
|
||||
def test_parse_resolution(self):
|
||||
self.assertEqual(parse_resolution(None), {})
|
||||
@@ -1222,12 +1229,49 @@ def test_is_html(self):
|
||||
def test_render_table(self):
|
||||
self.assertEqual(
|
||||
render_table(
|
||||
['a', 'bcd'],
|
||||
[[123, 4], [9999, 51]]),
|
||||
['a', 'empty', 'bcd'],
|
||||
[[123, '', 4], [9999, '', 51]]),
|
||||
'a empty bcd\n'
|
||||
'123 4\n'
|
||||
'9999 51')
|
||||
|
||||
self.assertEqual(
|
||||
render_table(
|
||||
['a', 'empty', 'bcd'],
|
||||
[[123, '', 4], [9999, '', 51]],
|
||||
hide_empty=True),
|
||||
'a bcd\n'
|
||||
'123 4\n'
|
||||
'9999 51')
|
||||
|
||||
self.assertEqual(
|
||||
render_table(
|
||||
['\ta', 'bcd'],
|
||||
[['1\t23', 4], ['\t9999', 51]]),
|
||||
' a bcd\n'
|
||||
'1 23 4\n'
|
||||
'9999 51')
|
||||
|
||||
self.assertEqual(
|
||||
render_table(
|
||||
['a', 'bcd'],
|
||||
[[123, 4], [9999, 51]],
|
||||
delim='-'),
|
||||
'a bcd\n'
|
||||
'--------\n'
|
||||
'123 4\n'
|
||||
'9999 51')
|
||||
|
||||
self.assertEqual(
|
||||
render_table(
|
||||
['a', 'bcd'],
|
||||
[[123, 4], [9999, 51]],
|
||||
delim='-', extra_gap=2),
|
||||
'a bcd\n'
|
||||
'----------\n'
|
||||
'123 4\n'
|
||||
'9999 51')
|
||||
|
||||
def test_match_str(self):
|
||||
# Unary
|
||||
self.assertFalse(match_str('xy', {'x': 1200}))
|
||||
@@ -1620,9 +1664,9 @@ def test_LazyList(self):
|
||||
self.assertEqual(repr(LazyList(it)), repr(it))
|
||||
self.assertEqual(str(LazyList(it)), str(it))
|
||||
|
||||
self.assertEqual(list(LazyList(it).reverse()), it[::-1])
|
||||
self.assertEqual(list(LazyList(it).reverse()[1:3:7]), it[::-1][1:3:7])
|
||||
self.assertEqual(list(LazyList(it).reverse()[::-1]), it)
|
||||
self.assertEqual(list(LazyList(it, reverse=True)), it[::-1])
|
||||
self.assertEqual(list(reversed(LazyList(it))[::-1]), it)
|
||||
self.assertEqual(list(reversed(LazyList(it))[1:3:7]), it[::-1][1:3:7])
|
||||
|
||||
def test_LazyList_laziness(self):
|
||||
|
||||
@@ -1635,13 +1679,13 @@ def test(ll, idx, val, cache):
|
||||
test(ll, 5, 5, range(6))
|
||||
test(ll, -3, 7, range(10))
|
||||
|
||||
ll = LazyList(range(10)).reverse()
|
||||
ll = LazyList(range(10), reverse=True)
|
||||
test(ll, -1, 0, range(1))
|
||||
test(ll, 3, 6, range(10))
|
||||
|
||||
ll = LazyList(itertools.count())
|
||||
test(ll, 10, 10, range(11))
|
||||
ll.reverse()
|
||||
ll = reversed(ll)
|
||||
test(ll, -15, 14, range(15))
|
||||
|
||||
|
||||
|
||||
@@ -26,29 +26,31 @@ def assertIsPlaylist(self, info):
|
||||
def test_youtube_playlist_noplaylist(self):
|
||||
dl = FakeYDL()
|
||||
dl.params['noplaylist'] = True
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
ie = YoutubeTabIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||
self.assertEqual(result['_type'], 'url')
|
||||
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
|
||||
self.assertEqual(YoutubeIE.extract_id(result['url']), 'FXxLjLQi3Fg')
|
||||
|
||||
def test_youtube_course(self):
|
||||
print('Skipping: Course URLs no longer exists')
|
||||
return
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
# TODO find a > 100 (paginating?) videos course
|
||||
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
entries = list(result['entries'])
|
||||
self.assertEqual(YoutubeIE().extract_id(entries[0]['url']), 'j9WZyLZCBzs')
|
||||
self.assertEqual(YoutubeIE.extract_id(entries[0]['url']), 'j9WZyLZCBzs')
|
||||
self.assertEqual(len(entries), 25)
|
||||
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
||||
self.assertEqual(YoutubeIE.extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
||||
|
||||
def test_youtube_mix(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/watch?v=W01L70IGBgE&index=2&list=RDOQpdSVF_k_w')
|
||||
entries = result['entries']
|
||||
ie = YoutubeTabIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/watch?v=tyITL_exICo&list=RDCLAK5uy_kLWIr9gv1XLlPbaDS965-Db4TrBoUTxQ8')
|
||||
entries = list(result['entries'])
|
||||
self.assertTrue(len(entries) >= 50)
|
||||
original_video = entries[0]
|
||||
self.assertEqual(original_video['id'], 'OQpdSVF_k_w')
|
||||
self.assertEqual(original_video['id'], 'tyITL_exICo')
|
||||
|
||||
def test_youtube_toptracks(self):
|
||||
print('Skipping: The playlist page gives error 500')
|
||||
@@ -68,10 +70,10 @@ def test_youtube_flat_playlist_extraction(self):
|
||||
entries = list(result['entries'])
|
||||
self.assertTrue(len(entries) == 1)
|
||||
video = entries[0]
|
||||
self.assertEqual(video['_type'], 'url_transparent')
|
||||
self.assertEqual(video['_type'], 'url')
|
||||
self.assertEqual(video['ie_key'], 'Youtube')
|
||||
self.assertEqual(video['id'], 'BaW_jenozKc')
|
||||
self.assertEqual(video['url'], 'BaW_jenozKc')
|
||||
self.assertEqual(video['url'], 'https://www.youtube.com/watch?v=BaW_jenozKc')
|
||||
self.assertEqual(video['title'], 'youtube-dl test video "\'/\\ä↭𝕐')
|
||||
self.assertEqual(video['duration'], 10)
|
||||
self.assertEqual(video['uploader'], 'Philipp Hagemeister')
|
||||
|
||||
@@ -74,6 +74,18 @@
|
||||
'https://www.youtube.com/s/player/f8cb7a3b/player_ias.vflset/en_US/base.js',
|
||||
'oBo2h5euWy6osrUt', 'ivXHpm7qJjJN',
|
||||
),
|
||||
(
|
||||
'https://www.youtube.com/s/player/2dfe380c/player_ias.vflset/en_US/base.js',
|
||||
'oBo2h5euWy6osrUt', '3DIBbn3qdQ',
|
||||
),
|
||||
(
|
||||
'https://www.youtube.com/s/player/f1ca6900/player_ias.vflset/en_US/base.js',
|
||||
'cu3wyu6LQn2hse', 'jvxetvmlI9AN9Q',
|
||||
),
|
||||
(
|
||||
'https://www.youtube.com/s/player/8040e515/player_ias.vflset/en_US/base.js',
|
||||
'wvOFaY-yjgDuIEg5', 'HkfBFDHmgw4rsw',
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,6 +18,7 @@
|
||||
)
|
||||
from .compat import (
|
||||
compat_getpass,
|
||||
compat_os_name,
|
||||
compat_shlex_quote,
|
||||
workaround_optparse_bug9161,
|
||||
)
|
||||
@@ -25,18 +26,17 @@
|
||||
from .utils import (
|
||||
DateRange,
|
||||
decodeOption,
|
||||
DownloadCancelled,
|
||||
DownloadError,
|
||||
error_to_compat_str,
|
||||
ExistingVideoReached,
|
||||
expand_path,
|
||||
GeoUtils,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
match_filter_func,
|
||||
MaxDownloadsReached,
|
||||
parse_duration,
|
||||
preferredencoding,
|
||||
read_batch_urls,
|
||||
RejectedVideoReached,
|
||||
render_table,
|
||||
SameFileError,
|
||||
setproctitle,
|
||||
@@ -73,7 +73,7 @@ def _real_main(argv=None):
|
||||
setproctitle('yt-dlp')
|
||||
|
||||
parser, opts, args = parseOpts(argv)
|
||||
warnings = []
|
||||
warnings, deprecation_warnings = [], []
|
||||
|
||||
# Set user agent
|
||||
if opts.user_agent is not None:
|
||||
@@ -96,6 +96,8 @@ def _real_main(argv=None):
|
||||
if opts.batchfile is not None:
|
||||
try:
|
||||
if opts.batchfile == '-':
|
||||
write_string('Reading URLs from stdin - EOF (%s) to end:\n' % (
|
||||
'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D'))
|
||||
batchfd = sys.stdin
|
||||
else:
|
||||
batchfd = io.open(
|
||||
@@ -136,6 +138,11 @@ def _real_main(argv=None):
|
||||
sys.exit(0)
|
||||
|
||||
# Conflicting, missing and erroneous options
|
||||
if opts.format == 'best':
|
||||
warnings.append('.\n '.join((
|
||||
'"-f best" selects the best pre-merged format which is often not the best option',
|
||||
'To let yt-dlp download and merge the best available formats, simply do not pass any format selection',
|
||||
'If you know what you are doing and want only the best pre-merged format, use "-f b" instead to suppress this warning')))
|
||||
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
||||
parser.error('using .netrc conflicts with giving username/password')
|
||||
if opts.password is not None and opts.username is None:
|
||||
@@ -195,7 +202,14 @@ def _real_main(argv=None):
|
||||
if opts.overwrites: # --yes-overwrites implies --no-continue
|
||||
opts.continue_dl = False
|
||||
if opts.concurrent_fragment_downloads <= 0:
|
||||
raise ValueError('Concurrent fragments must be positive')
|
||||
parser.error('Concurrent fragments must be positive')
|
||||
if opts.wait_for_video is not None:
|
||||
min_wait, max_wait, *_ = map(parse_duration, opts.wait_for_video.split('-', 1) + [None])
|
||||
if min_wait is None or (max_wait is None and '-' in opts.wait_for_video):
|
||||
parser.error('Invalid time range to wait')
|
||||
elif max_wait is not None and max_wait < min_wait:
|
||||
parser.error('Minimum time range to wait must not be longer than the maximum')
|
||||
opts.wait_for_video = (min_wait, max_wait)
|
||||
|
||||
def parse_retries(retries, name=''):
|
||||
if retries in ('inf', 'infinite'):
|
||||
@@ -208,6 +222,8 @@ def parse_retries(retries, name=''):
|
||||
return parsed_retries
|
||||
if opts.retries is not None:
|
||||
opts.retries = parse_retries(opts.retries)
|
||||
if opts.file_access_retries is not None:
|
||||
opts.file_access_retries = parse_retries(opts.file_access_retries, 'file access ')
|
||||
if opts.fragment_retries is not None:
|
||||
opts.fragment_retries = parse_retries(opts.fragment_retries, 'fragment ')
|
||||
if opts.extractor_retries is not None:
|
||||
@@ -223,9 +239,9 @@ def parse_retries(retries, name=''):
|
||||
parser.error('invalid http chunk size specified')
|
||||
opts.http_chunk_size = numeric_chunksize
|
||||
if opts.playliststart <= 0:
|
||||
raise ValueError('Playlist start must be positive')
|
||||
raise parser.error('Playlist start must be positive')
|
||||
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
|
||||
raise ValueError('Playlist end must be greater than playlist start')
|
||||
raise parser.error('Playlist end must be greater than playlist start')
|
||||
if opts.extractaudio:
|
||||
opts.audioformat = opts.audioformat.lower()
|
||||
if opts.audioformat not in ['best'] + list(FFmpegExtractAudioPP.SUPPORTED_EXTS):
|
||||
@@ -249,12 +265,17 @@ def parse_retries(retries, name=''):
|
||||
if opts.convertthumbnails is not None:
|
||||
if opts.convertthumbnails not in FFmpegThumbnailsConvertorPP.SUPPORTED_EXTS:
|
||||
parser.error('invalid thumbnail format specified')
|
||||
|
||||
if opts.cookiesfrombrowser is not None:
|
||||
opts.cookiesfrombrowser = [
|
||||
part.strip() or None for part in opts.cookiesfrombrowser.split(':', 1)]
|
||||
if opts.cookiesfrombrowser[0].lower() not in SUPPORTED_BROWSERS:
|
||||
parser.error('unsupported browser specified for cookies')
|
||||
geo_bypass_code = opts.geo_bypass_ip_block or opts.geo_bypass_country
|
||||
if geo_bypass_code is not None:
|
||||
try:
|
||||
GeoUtils.random_ipv4(geo_bypass_code)
|
||||
except Exception:
|
||||
parser.error('unsupported geo-bypass country or ip-block')
|
||||
|
||||
if opts.date is not None:
|
||||
date = DateRange.day(opts.date)
|
||||
@@ -290,6 +311,11 @@ def set_default_compat(compat_name, opt_name, default=True, remove_compat=True):
|
||||
set_default_compat('abort-on-error', 'ignoreerrors', 'only_download')
|
||||
set_default_compat('no-playlist-metafiles', 'allow_playlist_files')
|
||||
set_default_compat('no-clean-infojson', 'clean_infojson')
|
||||
if 'no-attach-info-json' in compat_opts:
|
||||
if opts.embed_infojson:
|
||||
_unused_compat_opt('no-attach-info-json')
|
||||
else:
|
||||
opts.embed_infojson = False
|
||||
if 'format-sort' in compat_opts:
|
||||
opts.format_sort.extend(InfoExtractor.FormatSort.ytdl_default)
|
||||
_video_multistreams_set = set_default_compat('multistreams', 'allow_multiple_video_streams', False, remove_compat=False)
|
||||
@@ -373,8 +399,6 @@ def metadataparser_actions(f):
|
||||
opts.sponsorblock_remove = set()
|
||||
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
||||
|
||||
if (opts.addmetadata or opts.sponsorblock_mark) and opts.addchapters is None:
|
||||
opts.addchapters = True
|
||||
opts.remove_chapters = opts.remove_chapters or []
|
||||
|
||||
if (opts.remove_chapters or sponsorblock_query) and opts.sponskrub is not False:
|
||||
@@ -395,40 +419,32 @@ def metadataparser_actions(f):
|
||||
opts.remuxvideo = False
|
||||
|
||||
if opts.allow_unplayable_formats:
|
||||
if opts.extractaudio:
|
||||
report_conflict('--allow-unplayable-formats', '--extract-audio')
|
||||
opts.extractaudio = False
|
||||
if opts.remuxvideo:
|
||||
report_conflict('--allow-unplayable-formats', '--remux-video')
|
||||
opts.remuxvideo = False
|
||||
if opts.recodevideo:
|
||||
report_conflict('--allow-unplayable-formats', '--recode-video')
|
||||
opts.recodevideo = False
|
||||
if opts.addmetadata:
|
||||
report_conflict('--allow-unplayable-formats', '--add-metadata')
|
||||
opts.addmetadata = False
|
||||
if opts.embedsubtitles:
|
||||
report_conflict('--allow-unplayable-formats', '--embed-subs')
|
||||
opts.embedsubtitles = False
|
||||
if opts.embedthumbnail:
|
||||
report_conflict('--allow-unplayable-formats', '--embed-thumbnail')
|
||||
opts.embedthumbnail = False
|
||||
if opts.xattrs:
|
||||
report_conflict('--allow-unplayable-formats', '--xattrs')
|
||||
opts.xattrs = False
|
||||
if opts.fixup and opts.fixup.lower() not in ('never', 'ignore'):
|
||||
report_conflict('--allow-unplayable-formats', '--fixup')
|
||||
def report_unplayable_conflict(opt_name, arg, default=False, allowed=None):
|
||||
val = getattr(opts, opt_name)
|
||||
if (not allowed and val) or (allowed and not allowed(val)):
|
||||
report_conflict('--allow-unplayable-formats', arg)
|
||||
setattr(opts, opt_name, default)
|
||||
|
||||
report_unplayable_conflict('extractaudio', '--extract-audio')
|
||||
report_unplayable_conflict('remuxvideo', '--remux-video')
|
||||
report_unplayable_conflict('recodevideo', '--recode-video')
|
||||
report_unplayable_conflict('addmetadata', '--embed-metadata')
|
||||
report_unplayable_conflict('addchapters', '--embed-chapters')
|
||||
report_unplayable_conflict('embed_infojson', '--embed-info-json')
|
||||
opts.embed_infojson = False
|
||||
report_unplayable_conflict('embedsubtitles', '--embed-subs')
|
||||
report_unplayable_conflict('embedthumbnail', '--embed-thumbnail')
|
||||
report_unplayable_conflict('xattrs', '--xattrs')
|
||||
report_unplayable_conflict('fixup', '--fixup', default='never', allowed=lambda x: x in (None, 'never', 'ignore'))
|
||||
opts.fixup = 'never'
|
||||
if opts.remove_chapters:
|
||||
report_conflict('--allow-unplayable-formats', '--remove-chapters')
|
||||
opts.remove_chapters = []
|
||||
if opts.sponsorblock_remove:
|
||||
report_conflict('--allow-unplayable-formats', '--sponsorblock-remove')
|
||||
opts.sponsorblock_remove = set()
|
||||
if opts.sponskrub:
|
||||
report_conflict('--allow-unplayable-formats', '--sponskrub')
|
||||
report_unplayable_conflict('remove_chapters', '--remove-chapters', default=[])
|
||||
report_unplayable_conflict('sponsorblock_remove', '--sponsorblock-remove', default=set())
|
||||
report_unplayable_conflict('sponskrub', '--sponskrub', default=set())
|
||||
opts.sponskrub = False
|
||||
|
||||
if (opts.addmetadata or opts.sponsorblock_mark) and opts.addchapters is None:
|
||||
opts.addchapters = True
|
||||
|
||||
# PostProcessors
|
||||
postprocessors = list(opts.add_postprocessors)
|
||||
if sponsorblock_query:
|
||||
@@ -506,7 +522,7 @@ def metadataparser_actions(f):
|
||||
if len(dur) == 2 and all(t is not None for t in dur):
|
||||
remove_ranges.append(tuple(dur))
|
||||
continue
|
||||
parser.error(f'invalid --remove-chapters time range {regex!r}. Must be of the form ?start-end')
|
||||
parser.error(f'invalid --remove-chapters time range {regex!r}. Must be of the form *start-end')
|
||||
try:
|
||||
remove_chapters_patterns.append(re.compile(regex))
|
||||
except re.error as err:
|
||||
@@ -526,13 +542,16 @@ def metadataparser_actions(f):
|
||||
# By default ffmpeg preserves metadata applicable for both
|
||||
# source and target containers. From this point the container won't change,
|
||||
# so metadata can be added here.
|
||||
if opts.addmetadata or opts.addchapters:
|
||||
if opts.addmetadata or opts.addchapters or opts.embed_infojson:
|
||||
if opts.embed_infojson is None:
|
||||
opts.embed_infojson = 'if_exists'
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegMetadata',
|
||||
'add_chapters': opts.addchapters,
|
||||
'add_metadata': opts.addmetadata,
|
||||
'add_infojson': opts.embed_infojson,
|
||||
})
|
||||
# Note: Deprecated
|
||||
# Deprecated
|
||||
# This should be above EmbedThumbnail since sponskrub removes the thumbnail attachment
|
||||
# but must be below EmbedSubtitle and FFmpegMetadata
|
||||
# See https://github.com/yt-dlp/yt-dlp/issues/204 , https://github.com/faissaloo/SponSkrub/issues/29
|
||||
@@ -545,15 +564,15 @@ def metadataparser_actions(f):
|
||||
'cut': opts.sponskrub_cut,
|
||||
'force': opts.sponskrub_force,
|
||||
'ignoreerror': opts.sponskrub is None,
|
||||
'_from_cli': True,
|
||||
})
|
||||
if opts.embedthumbnail:
|
||||
already_have_thumbnail = opts.writethumbnail or opts.write_all_thumbnails
|
||||
postprocessors.append({
|
||||
'key': 'EmbedThumbnail',
|
||||
# already_have_thumbnail = True prevents the file from being deleted after embedding
|
||||
'already_have_thumbnail': already_have_thumbnail
|
||||
'already_have_thumbnail': opts.writethumbnail
|
||||
})
|
||||
if not already_have_thumbnail:
|
||||
if not opts.writethumbnail:
|
||||
opts.writethumbnail = True
|
||||
opts.outtmpl['pl_thumbnail'] = ''
|
||||
if opts.split_chapters:
|
||||
@@ -584,6 +603,19 @@ def report_args_compat(arg, name):
|
||||
opts.postprocessor_args.setdefault('sponskrub', [])
|
||||
opts.postprocessor_args['default'] = opts.postprocessor_args['default-compat']
|
||||
|
||||
def report_deprecation(val, old, new=None):
|
||||
if not val:
|
||||
return
|
||||
deprecation_warnings.append(
|
||||
f'{old} is deprecated and may be removed in a future version. Use {new} instead' if new
|
||||
else f'{old} is deprecated and may not work as expected')
|
||||
|
||||
report_deprecation(opts.sponskrub, '--sponskrub', '--sponsorblock-mark or --sponsorblock-remove')
|
||||
report_deprecation(not opts.prefer_ffmpeg, '--prefer-avconv', 'ffmpeg')
|
||||
report_deprecation(opts.include_ads, '--include-ads')
|
||||
# report_deprecation(opts.call_home, '--call-home') # We may re-implement this in future
|
||||
# report_deprecation(opts.writeannotations, '--write-annotations') # It's just that no website has it
|
||||
|
||||
final_ext = (
|
||||
opts.recodevideo if opts.recodevideo in FFmpegVideoConvertorPP.SUPPORTED_EXTS
|
||||
else opts.remuxvideo if opts.remuxvideo in FFmpegVideoRemuxerPP.SUPPORTED_EXTS
|
||||
@@ -643,6 +675,7 @@ def report_args_compat(arg, name):
|
||||
'throttledratelimit': opts.throttledratelimit,
|
||||
'overwrites': opts.overwrites,
|
||||
'retries': opts.retries,
|
||||
'file_access_retries': opts.file_access_retries,
|
||||
'fragment_retries': opts.fragment_retries,
|
||||
'extractor_retries': opts.extractor_retries,
|
||||
'skip_unavailable_fragments': opts.skip_unavailable_fragments,
|
||||
@@ -670,8 +703,8 @@ def report_args_compat(arg, name):
|
||||
'allow_playlist_files': opts.allow_playlist_files,
|
||||
'clean_infojson': opts.clean_infojson,
|
||||
'getcomments': opts.getcomments,
|
||||
'writethumbnail': opts.writethumbnail,
|
||||
'write_all_thumbnails': opts.write_all_thumbnails,
|
||||
'writethumbnail': opts.writethumbnail is True,
|
||||
'write_all_thumbnails': opts.writethumbnail == 'all',
|
||||
'writelink': opts.writelink,
|
||||
'writeurllink': opts.writeurllink,
|
||||
'writewebloclink': opts.writewebloclink,
|
||||
@@ -703,6 +736,7 @@ def report_args_compat(arg, name):
|
||||
'download_archive': download_archive_fn,
|
||||
'break_on_existing': opts.break_on_existing,
|
||||
'break_on_reject': opts.break_on_reject,
|
||||
'break_per_url': opts.break_per_url,
|
||||
'skip_playlist_after_errors': opts.skip_playlist_after_errors,
|
||||
'cookiefile': opts.cookiefile,
|
||||
'cookiesfrombrowser': opts.cookiesfrombrowser,
|
||||
@@ -721,6 +755,8 @@ def report_args_compat(arg, name):
|
||||
'youtube_include_hls_manifest': opts.youtube_include_hls_manifest,
|
||||
'encoding': opts.encoding,
|
||||
'extract_flat': opts.extract_flat,
|
||||
'live_from_start': opts.live_from_start,
|
||||
'wait_for_video': opts.wait_for_video,
|
||||
'mark_watched': opts.mark_watched,
|
||||
'merge_output_format': opts.merge_output_format,
|
||||
'final_ext': final_ext,
|
||||
@@ -750,11 +786,12 @@ def report_args_compat(arg, name):
|
||||
'geo_bypass_country': opts.geo_bypass_country,
|
||||
'geo_bypass_ip_block': opts.geo_bypass_ip_block,
|
||||
'_warnings': warnings,
|
||||
'_deprecation_warnings': deprecation_warnings,
|
||||
'compat_opts': compat_opts,
|
||||
}
|
||||
|
||||
with YoutubeDL(ydl_opts) as ydl:
|
||||
actual_use = len(all_urls) or opts.load_info_filename
|
||||
actual_use = all_urls or opts.load_info_filename
|
||||
|
||||
# Remove cache dir
|
||||
if opts.rm_cachedir:
|
||||
@@ -783,7 +820,7 @@ def report_args_compat(arg, name):
|
||||
retcode = ydl.download_with_info_file(expand_path(opts.load_info_filename))
|
||||
else:
|
||||
retcode = ydl.download(all_urls)
|
||||
except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
|
||||
except DownloadCancelled:
|
||||
ydl.to_screen('Aborting remaining downloads')
|
||||
retcode = 101
|
||||
|
||||
|
||||
@@ -28,6 +28,48 @@ def aes_gcm_decrypt_and_verify_bytes(data, key, tag, nonce):
|
||||
BLOCK_SIZE_BYTES = 16
|
||||
|
||||
|
||||
def aes_ecb_encrypt(data, key, iv=None):
|
||||
"""
|
||||
Encrypt with aes in ECB mode
|
||||
|
||||
@param {int[]} data cleartext
|
||||
@param {int[]} key 16/24/32-Byte cipher key
|
||||
@param {int[]} iv Unused for this mode
|
||||
@returns {int[]} encrypted data
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
|
||||
encrypted_data = []
|
||||
for i in range(block_count):
|
||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||
encrypted_data += aes_encrypt(block, expanded_key)
|
||||
encrypted_data = encrypted_data[:len(data)]
|
||||
|
||||
return encrypted_data
|
||||
|
||||
|
||||
def aes_ecb_decrypt(data, key, iv=None):
|
||||
"""
|
||||
Decrypt with aes in ECB mode
|
||||
|
||||
@param {int[]} data cleartext
|
||||
@param {int[]} key 16/24/32-Byte cipher key
|
||||
@param {int[]} iv Unused for this mode
|
||||
@returns {int[]} decrypted data
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
|
||||
encrypted_data = []
|
||||
for i in range(block_count):
|
||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||
encrypted_data += aes_decrypt(block, expanded_key)
|
||||
encrypted_data = encrypted_data[:len(data)]
|
||||
|
||||
return encrypted_data
|
||||
|
||||
|
||||
def aes_ctr_decrypt(data, key, iv):
|
||||
"""
|
||||
Decrypt with aes in counter mode
|
||||
|
||||
@@ -160,12 +160,20 @@ def compat_expanduser(path):
|
||||
compat_pycrypto_AES = None
|
||||
|
||||
|
||||
WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None
|
||||
|
||||
|
||||
def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
|
||||
if compat_os_name != 'nt':
|
||||
return
|
||||
global WINDOWS_VT_MODE
|
||||
startupinfo = subprocess.STARTUPINFO()
|
||||
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
|
||||
subprocess.Popen('', shell=True, startupinfo=startupinfo)
|
||||
try:
|
||||
subprocess.Popen('', shell=True, startupinfo=startupinfo)
|
||||
WINDOWS_VT_MODE = True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
# Deprecated
|
||||
@@ -226,6 +234,7 @@ def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.pytho
|
||||
# Set public objects
|
||||
|
||||
__all__ = [
|
||||
'WINDOWS_VT_MODE',
|
||||
'compat_HTMLParseError',
|
||||
'compat_HTMLParser',
|
||||
'compat_HTTPError',
|
||||
|
||||
@@ -12,10 +12,15 @@ def get_suitable_downloader(info_dict, params={}, default=NO_DEFAULT, protocol=N
|
||||
info_copy = info_dict.copy()
|
||||
info_copy['to_stdout'] = to_stdout
|
||||
|
||||
downloaders = [_get_suitable_downloader(info_copy, proto, params, default)
|
||||
for proto in (protocol or info_copy['protocol']).split('+')]
|
||||
protocols = (protocol or info_copy['protocol']).split('+')
|
||||
downloaders = [_get_suitable_downloader(info_copy, proto, params, default) for proto in protocols]
|
||||
|
||||
if set(downloaders) == {FFmpegFD} and FFmpegFD.can_merge_formats(info_copy, params):
|
||||
return FFmpegFD
|
||||
elif (set(downloaders) == {DashSegmentsFD}
|
||||
and not (to_stdout and len(protocols) > 1)
|
||||
and set(protocols) == {'http_dash_segments_generator'}):
|
||||
return DashSegmentsFD
|
||||
elif len(downloaders) == 1:
|
||||
return downloaders[0]
|
||||
return None
|
||||
@@ -41,6 +46,7 @@ def get_suitable_downloader(info_dict, params={}, default=NO_DEFAULT, protocol=N
|
||||
|
||||
PROTOCOL_MAP = {
|
||||
'rtmp': RtmpFD,
|
||||
'rtmpe': RtmpFD,
|
||||
'rtmp_ffmpeg': FFmpegFD,
|
||||
'm3u8_native': HlsFD,
|
||||
'm3u8': FFmpegFD,
|
||||
@@ -48,6 +54,7 @@ def get_suitable_downloader(info_dict, params={}, default=NO_DEFAULT, protocol=N
|
||||
'rtsp': RtspFD,
|
||||
'f4m': F4mFD,
|
||||
'http_dash_segments': DashSegmentsFD,
|
||||
'http_dash_segments_generator': DashSegmentsFD,
|
||||
'ism': IsmFD,
|
||||
'mhtml': MhtmlFD,
|
||||
'niconico_dmc': NiconicoDmcFD,
|
||||
@@ -62,6 +69,7 @@ def shorten_protocol_name(proto, simplify=False):
|
||||
'm3u8_native': 'm3u8_n',
|
||||
'rtmp_ffmpeg': 'rtmp_f',
|
||||
'http_dash_segments': 'dash',
|
||||
'http_dash_segments_generator': 'dash_g',
|
||||
'niconico_dmc': 'dmc',
|
||||
'websocket_frag': 'WSfrag',
|
||||
}
|
||||
@@ -70,6 +78,7 @@ def shorten_protocol_name(proto, simplify=False):
|
||||
'https': 'http',
|
||||
'ftps': 'ftp',
|
||||
'm3u8_native': 'm3u8',
|
||||
'http_dash_segments_generator': 'dash',
|
||||
'rtmp_ffmpeg': 'rtmp',
|
||||
'm3u8_frag_urls': 'm3u8',
|
||||
'dash_frag_urls': 'dash',
|
||||
|
||||
@@ -4,12 +4,14 @@
|
||||
import re
|
||||
import time
|
||||
import random
|
||||
import errno
|
||||
|
||||
from ..utils import (
|
||||
decodeArgument,
|
||||
encodeFilename,
|
||||
error_to_compat_str,
|
||||
format_bytes,
|
||||
sanitize_open,
|
||||
shell_quote,
|
||||
timeconvert,
|
||||
timetuple_from_msec,
|
||||
@@ -39,6 +41,7 @@ class FileDownloader(object):
|
||||
ratelimit: Download speed limit, in bytes/sec.
|
||||
throttledratelimit: Assume the download is being throttled below this speed (bytes/sec)
|
||||
retries: Number of times to retry for HTTP error 5xx
|
||||
file_access_retries: Number of times to retry on file access error
|
||||
buffersize: Size of download buffer in bytes.
|
||||
noresizebuffer: Do not automatically resize the download buffer.
|
||||
continuedl: Try to continue downloads if possible.
|
||||
@@ -93,6 +96,8 @@ def calc_percent(byte_counter, data_len):
|
||||
def format_percent(percent):
|
||||
if percent is None:
|
||||
return '---.-%'
|
||||
elif percent == 100:
|
||||
return '100%'
|
||||
return '%6s' % ('%3.1f%%' % percent)
|
||||
|
||||
@staticmethod
|
||||
@@ -205,6 +210,21 @@ def undo_temp_name(self, filename):
|
||||
def ytdl_filename(self, filename):
|
||||
return filename + '.ytdl'
|
||||
|
||||
def sanitize_open(self, filename, open_mode):
|
||||
file_access_retries = self.params.get('file_access_retries', 10)
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
return sanitize_open(filename, open_mode)
|
||||
except (IOError, OSError) as err:
|
||||
retry = retry + 1
|
||||
if retry > file_access_retries or err.errno not in (errno.EACCES,):
|
||||
raise
|
||||
self.to_screen(
|
||||
'[download] Got file access error. Retrying (attempt %d of %s) ...'
|
||||
% (retry, self.format_retries(file_access_retries)))
|
||||
time.sleep(0.01)
|
||||
|
||||
def try_rename(self, old_filename, new_filename):
|
||||
if old_filename == new_filename:
|
||||
return
|
||||
@@ -247,11 +267,29 @@ def _prepare_multiline_status(self, lines=1):
|
||||
self._multiline = BreaklineStatusPrinter(self.ydl._screen_file, lines)
|
||||
else:
|
||||
self._multiline = MultilinePrinter(self.ydl._screen_file, lines, not self.params.get('quiet'))
|
||||
self._multiline.allow_colors = self._multiline._HAVE_FULLCAP and not self.params.get('no_color')
|
||||
|
||||
def _finish_multiline_status(self):
|
||||
self._multiline.end()
|
||||
|
||||
def _report_progress_status(self, s):
|
||||
_progress_styles = {
|
||||
'downloaded_bytes': 'light blue',
|
||||
'percent': 'light blue',
|
||||
'eta': 'yellow',
|
||||
'speed': 'green',
|
||||
'elapsed': 'bold white',
|
||||
'total_bytes': '',
|
||||
'total_bytes_estimate': '',
|
||||
}
|
||||
|
||||
def _report_progress_status(self, s, default_template):
|
||||
for name, style in self._progress_styles.items():
|
||||
name = f'_{name}_str'
|
||||
if name not in s:
|
||||
continue
|
||||
s[name] = self._format_progress(s[name], style)
|
||||
s['_default_template'] = default_template % s
|
||||
|
||||
progress_dict = s.copy()
|
||||
progress_dict.pop('info_dict')
|
||||
progress_dict = {'info': s['info_dict'], 'progress': progress_dict}
|
||||
@@ -264,6 +302,10 @@ def _report_progress_status(self, s):
|
||||
progress_template.get('download-title') or 'yt-dlp %(progress._default_template)s',
|
||||
progress_dict))
|
||||
|
||||
def _format_progress(self, *args, **kwargs):
|
||||
return self.ydl._format_text(
|
||||
self._multiline.stream, self._multiline.allow_colors, *args, **kwargs)
|
||||
|
||||
def report_progress(self, s):
|
||||
if s['status'] == 'finished':
|
||||
if self.params.get('noprogress'):
|
||||
@@ -276,8 +318,7 @@ def report_progress(self, s):
|
||||
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
|
||||
msg_template += ' in %(_elapsed_str)s'
|
||||
s['_percent_str'] = self.format_percent(100)
|
||||
s['_default_template'] = msg_template % s
|
||||
self._report_progress_status(s)
|
||||
self._report_progress_status(s, msg_template)
|
||||
return
|
||||
|
||||
if s['status'] != 'downloading':
|
||||
@@ -286,7 +327,7 @@ def report_progress(self, s):
|
||||
if s.get('eta') is not None:
|
||||
s['_eta_str'] = self.format_eta(s['eta'])
|
||||
else:
|
||||
s['_eta_str'] = 'Unknown ETA'
|
||||
s['_eta_str'] = 'Unknown'
|
||||
|
||||
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
|
||||
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
|
||||
@@ -318,13 +359,12 @@ def report_progress(self, s):
|
||||
else:
|
||||
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
|
||||
else:
|
||||
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
|
||||
msg_template = '%(_percent_str)s at %(_speed_str)s ETA %(_eta_str)s'
|
||||
if s.get('fragment_index') and s.get('fragment_count'):
|
||||
msg_template += ' (frag %(fragment_index)s/%(fragment_count)s)'
|
||||
elif s.get('fragment_index'):
|
||||
msg_template += ' (frag %(fragment_index)s)'
|
||||
s['_default_template'] = msg_template % s
|
||||
self._report_progress_status(s)
|
||||
self._report_progress_status(s, msg_template)
|
||||
|
||||
def report_resuming_byte(self, resume_len):
|
||||
"""Report attempt to resume at given byte."""
|
||||
@@ -375,6 +415,7 @@ def download(self, filename, info_dict, subtitle=False):
|
||||
'status': 'finished',
|
||||
'total_bytes': os.path.getsize(encodeFilename(filename)),
|
||||
}, info_dict)
|
||||
self._finish_multiline_status()
|
||||
return True, False
|
||||
|
||||
if subtitle is False:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
import time
|
||||
|
||||
from ..downloader import get_suitable_downloader
|
||||
from .fragment import FragmentFD
|
||||
@@ -15,27 +16,53 @@ class DashSegmentsFD(FragmentFD):
|
||||
FD_NAME = 'dashsegments'
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
if info_dict.get('is_live'):
|
||||
if info_dict.get('is_live') and set(info_dict['protocol'].split('+')) != {'http_dash_segments_generator'}:
|
||||
self.report_error('Live DASH videos are not supported')
|
||||
|
||||
fragment_base_url = info_dict.get('fragment_base_url')
|
||||
fragments = info_dict['fragments'][:1] if self.params.get(
|
||||
'test', False) else info_dict['fragments']
|
||||
|
||||
real_start = time.time()
|
||||
real_downloader = get_suitable_downloader(
|
||||
info_dict, self.params, None, protocol='dash_frag_urls', to_stdout=(filename == '-'))
|
||||
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
'total_frags': len(fragments),
|
||||
}
|
||||
requested_formats = [{**info_dict, **fmt} for fmt in info_dict.get('requested_formats', [])]
|
||||
args = []
|
||||
for fmt in requested_formats or [info_dict]:
|
||||
try:
|
||||
fragment_count = 1 if self.params.get('test') else len(fmt['fragments'])
|
||||
except TypeError:
|
||||
fragment_count = None
|
||||
ctx = {
|
||||
'filename': fmt.get('filepath') or filename,
|
||||
'live': 'is_from_start' if fmt.get('is_from_start') else fmt.get('is_live'),
|
||||
'total_frags': fragment_count,
|
||||
}
|
||||
|
||||
if real_downloader:
|
||||
self._prepare_external_frag_download(ctx)
|
||||
else:
|
||||
self._prepare_and_start_frag_download(ctx, info_dict)
|
||||
if real_downloader:
|
||||
self._prepare_external_frag_download(ctx)
|
||||
else:
|
||||
self._prepare_and_start_frag_download(ctx, fmt)
|
||||
ctx['start'] = real_start
|
||||
|
||||
fragments_to_download = self._get_fragments(fmt, ctx)
|
||||
|
||||
if real_downloader:
|
||||
self.to_screen(
|
||||
'[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename()))
|
||||
info_dict['fragments'] = list(fragments_to_download)
|
||||
fd = real_downloader(self.ydl, self.params)
|
||||
return fd.real_download(filename, info_dict)
|
||||
|
||||
args.append([ctx, fragments_to_download, fmt])
|
||||
|
||||
return self.download_and_append_fragments_multiple(*args)
|
||||
|
||||
def _resolve_fragments(self, fragments, ctx):
|
||||
fragments = fragments(ctx) if callable(fragments) else fragments
|
||||
return [next(iter(fragments))] if self.params.get('test') else fragments
|
||||
|
||||
def _get_fragments(self, fmt, ctx):
|
||||
fragment_base_url = fmt.get('fragment_base_url')
|
||||
fragments = self._resolve_fragments(fmt['fragments'], ctx)
|
||||
|
||||
fragments_to_download = []
|
||||
frag_index = 0
|
||||
for i, fragment in enumerate(fragments):
|
||||
frag_index += 1
|
||||
@@ -46,17 +73,8 @@ def real_download(self, filename, info_dict):
|
||||
assert fragment_base_url
|
||||
fragment_url = urljoin(fragment_base_url, fragment['path'])
|
||||
|
||||
fragments_to_download.append({
|
||||
yield {
|
||||
'frag_index': frag_index,
|
||||
'index': i,
|
||||
'url': fragment_url,
|
||||
})
|
||||
|
||||
if real_downloader:
|
||||
self.to_screen(
|
||||
'[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename()))
|
||||
info_dict['fragments'] = fragments_to_download
|
||||
fd = real_downloader(self.ydl, self.params)
|
||||
return fd.real_download(filename, info_dict)
|
||||
|
||||
return self.download_and_append_fragments(ctx, fragments_to_download, info_dict)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
handle_youtubedl_headers,
|
||||
check_executable,
|
||||
Popen,
|
||||
sanitize_open,
|
||||
)
|
||||
|
||||
|
||||
@@ -144,11 +143,11 @@ def _call_downloader(self, tmpfilename, info_dict):
|
||||
return -1
|
||||
|
||||
decrypt_fragment = self.decrypter(info_dict)
|
||||
dest, _ = sanitize_open(tmpfilename, 'wb')
|
||||
dest, _ = self.sanitize_open(tmpfilename, 'wb')
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index)
|
||||
try:
|
||||
src, _ = sanitize_open(fragment_filename, 'rb')
|
||||
src, _ = self.sanitize_open(fragment_filename, 'rb')
|
||||
except IOError as err:
|
||||
if skip_unavailable_fragments and frag_index > 1:
|
||||
self.report_skip_fragment(frag_index, err)
|
||||
@@ -290,7 +289,7 @@ def _make_cmd(self, tmpfilename, info_dict):
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index)
|
||||
url_list.append('%s\n\tout=%s' % (fragment['url'], fragment_filename))
|
||||
stream, _ = sanitize_open(url_list_file, 'wb')
|
||||
stream, _ = self.sanitize_open(url_list_file, 'wb')
|
||||
stream.write('\n'.join(url_list).encode('utf-8'))
|
||||
stream.close()
|
||||
cmd += ['-i', url_list_file]
|
||||
@@ -443,8 +442,7 @@ def _call_downloader(self, tmpfilename, info_dict):
|
||||
if info_dict.get('requested_formats') or protocol == 'http_dash_segments':
|
||||
for (i, fmt) in enumerate(info_dict.get('requested_formats') or [info_dict]):
|
||||
stream_number = fmt.get('manifest_stream_number', 0)
|
||||
a_or_v = 'a' if fmt.get('acodec') != 'none' else 'v'
|
||||
args.extend(['-map', f'{i}:{a_or_v}:{stream_number}'])
|
||||
args.extend(['-map', f'{i}:{stream_number}'])
|
||||
|
||||
if self.params.get('test', False):
|
||||
args += ['-fs', compat_str(self._TEST_FILE_SIZE)]
|
||||
|
||||
@@ -366,7 +366,7 @@ def real_download(self, filename, info_dict):
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
'total_frags': total_frags,
|
||||
'live': live,
|
||||
'live': bool(live),
|
||||
}
|
||||
|
||||
self._prepare_frag_download(ctx)
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
from __future__ import division, unicode_literals
|
||||
|
||||
import http.client
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
from math import ceil
|
||||
|
||||
try:
|
||||
import concurrent.futures
|
||||
@@ -15,6 +16,7 @@
|
||||
from .http import HttpFD
|
||||
from ..aes import aes_cbc_decrypt_bytes
|
||||
from ..compat import (
|
||||
compat_os_name,
|
||||
compat_urllib_error,
|
||||
compat_struct_pack,
|
||||
)
|
||||
@@ -22,7 +24,6 @@
|
||||
DownloadError,
|
||||
error_to_compat_str,
|
||||
encodeFilename,
|
||||
sanitize_open,
|
||||
sanitized_Request,
|
||||
)
|
||||
|
||||
@@ -90,11 +91,11 @@ def _prepare_and_start_frag_download(self, ctx, info_dict):
|
||||
self._start_frag_download(ctx, info_dict)
|
||||
|
||||
def __do_ytdl_file(self, ctx):
|
||||
return not ctx['live'] and not ctx['tmpfilename'] == '-' and not self.params.get('_no_ytdl_file')
|
||||
return ctx['live'] is not True and ctx['tmpfilename'] != '-' and not self.params.get('_no_ytdl_file')
|
||||
|
||||
def _read_ytdl_file(self, ctx):
|
||||
assert 'ytdl_corrupt' not in ctx
|
||||
stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'r')
|
||||
stream, _ = self.sanitize_open(self.ytdl_filename(ctx['filename']), 'r')
|
||||
try:
|
||||
ytdl_data = json.loads(stream.read())
|
||||
ctx['fragment_index'] = ytdl_data['downloader']['current_fragment']['index']
|
||||
@@ -106,7 +107,7 @@ def _read_ytdl_file(self, ctx):
|
||||
stream.close()
|
||||
|
||||
def _write_ytdl_file(self, ctx):
|
||||
frag_index_stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
|
||||
frag_index_stream, _ = self.sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
|
||||
try:
|
||||
downloader = {
|
||||
'current_fragment': {
|
||||
@@ -138,7 +139,7 @@ def _download_fragment(self, ctx, frag_url, info_dict, headers=None, request_dat
|
||||
return True, self._read_fragment(ctx)
|
||||
|
||||
def _read_fragment(self, ctx):
|
||||
down, frag_sanitized = sanitize_open(ctx['fragment_filename_sanitized'], 'rb')
|
||||
down, frag_sanitized = self.sanitize_open(ctx['fragment_filename_sanitized'], 'rb')
|
||||
ctx['fragment_filename_sanitized'] = frag_sanitized
|
||||
frag_content = down.read()
|
||||
down.close()
|
||||
@@ -214,7 +215,7 @@ def _prepare_frag_download(self, ctx):
|
||||
self._write_ytdl_file(ctx)
|
||||
assert ctx['fragment_index'] == 0
|
||||
|
||||
dest_stream, tmpfilename = sanitize_open(tmpfilename, open_mode)
|
||||
dest_stream, tmpfilename = self.sanitize_open(tmpfilename, open_mode)
|
||||
|
||||
ctx.update({
|
||||
'dl': dl,
|
||||
@@ -375,17 +376,20 @@ def download_and_append_fragments_multiple(self, *args, pack_func=None, finish_f
|
||||
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
||||
all args must be either tuple or list
|
||||
'''
|
||||
interrupt_trigger = [True]
|
||||
max_progress = len(args)
|
||||
if max_progress == 1:
|
||||
return self.download_and_append_fragments(*args[0], pack_func=pack_func, finish_func=finish_func)
|
||||
max_workers = self.params.get('concurrent_fragment_downloads', max_progress)
|
||||
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
||||
if max_progress > 1:
|
||||
self._prepare_multiline_status(max_progress)
|
||||
|
||||
def thread_func(idx, ctx, fragments, info_dict, tpe):
|
||||
ctx['max_progress'] = max_progress
|
||||
ctx['progress_idx'] = idx
|
||||
return self.download_and_append_fragments(ctx, fragments, info_dict, pack_func=pack_func, finish_func=finish_func, tpe=tpe)
|
||||
return self.download_and_append_fragments(
|
||||
ctx, fragments, info_dict, pack_func=pack_func, finish_func=finish_func,
|
||||
tpe=tpe, interrupt_trigger=interrupt_trigger)
|
||||
|
||||
class FTPE(concurrent.futures.ThreadPoolExecutor):
|
||||
# has to stop this or it's going to wait on the worker thread itself
|
||||
@@ -393,8 +397,11 @@ def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
|
||||
spins = []
|
||||
if compat_os_name == 'nt':
|
||||
self.report_warning('Ctrl+C does not work on Windows when used with parallel threads. '
|
||||
'This is a known issue and patches are welcome')
|
||||
for idx, (ctx, fragments, info_dict) in enumerate(args):
|
||||
tpe = FTPE(ceil(max_workers / max_progress))
|
||||
tpe = FTPE(math.ceil(max_workers / max_progress))
|
||||
job = tpe.submit(thread_func, idx, ctx, fragments, info_dict, tpe)
|
||||
spins.append((tpe, job))
|
||||
|
||||
@@ -402,18 +409,32 @@ def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
for tpe, job in spins:
|
||||
try:
|
||||
result = result and job.result()
|
||||
except KeyboardInterrupt:
|
||||
interrupt_trigger[0] = False
|
||||
finally:
|
||||
tpe.shutdown(wait=True)
|
||||
if not interrupt_trigger[0]:
|
||||
raise KeyboardInterrupt()
|
||||
return result
|
||||
|
||||
def download_and_append_fragments(self, ctx, fragments, info_dict, *, pack_func=None, finish_func=None, tpe=None):
|
||||
def download_and_append_fragments(
|
||||
self, ctx, fragments, info_dict, *, pack_func=None, finish_func=None,
|
||||
tpe=None, interrupt_trigger=None):
|
||||
if not interrupt_trigger:
|
||||
interrupt_trigger = (True, )
|
||||
|
||||
fragment_retries = self.params.get('fragment_retries', 0)
|
||||
is_fatal = (lambda idx: idx == 0) if self.params.get('skip_unavailable_fragments', True) else (lambda _: True)
|
||||
is_fatal = (
|
||||
((lambda _: False) if info_dict.get('is_live') else (lambda idx: idx == 0))
|
||||
if self.params.get('skip_unavailable_fragments', True) else (lambda _: True))
|
||||
|
||||
if not pack_func:
|
||||
pack_func = lambda frag_content, _: frag_content
|
||||
|
||||
def download_fragment(fragment, ctx):
|
||||
frag_index = ctx['fragment_index'] = fragment['frag_index']
|
||||
if not interrupt_trigger[0]:
|
||||
return False, frag_index
|
||||
headers = info_dict.get('http_headers', {}).copy()
|
||||
byte_range = fragment.get('byte_range')
|
||||
if byte_range:
|
||||
@@ -428,7 +449,7 @@ def download_fragment(fragment, ctx):
|
||||
if not success:
|
||||
return False, frag_index
|
||||
break
|
||||
except compat_urllib_error.HTTPError as err:
|
||||
except (compat_urllib_error.HTTPError, http.client.IncompleteRead) as err:
|
||||
# Unavailable (possibly temporary) fragments may be served.
|
||||
# First we try to retry then either skip or abort.
|
||||
# See https://github.com/ytdl-org/youtube-dl/issues/10165,
|
||||
@@ -466,7 +487,8 @@ def append_fragment(frag_content, frag_index, ctx):
|
||||
|
||||
decrypt_fragment = self.decrypter(info_dict)
|
||||
|
||||
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
||||
max_workers = math.ceil(
|
||||
self.params.get('concurrent_fragment_downloads', 1) / ctx.get('max_progress', 1))
|
||||
if can_threaded_download and max_workers > 1:
|
||||
|
||||
def _download_fragment(fragment):
|
||||
@@ -477,6 +499,8 @@ def _download_fragment(fragment):
|
||||
self.report_warning('The download speed shown is only of one thread. This is a known issue and patches are welcome')
|
||||
with tpe or concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
||||
for fragment, frag_content, frag_index, frag_filename in pool.map(_download_fragment, fragments):
|
||||
if not interrupt_trigger[0]:
|
||||
break
|
||||
ctx['fragment_filename_sanitized'] = frag_filename
|
||||
ctx['fragment_index'] = frag_index
|
||||
result = append_fragment(decrypt_fragment(fragment, frag_content), frag_index, ctx)
|
||||
@@ -484,6 +508,8 @@ def _download_fragment(fragment):
|
||||
return False
|
||||
else:
|
||||
for fragment in fragments:
|
||||
if not interrupt_trigger[0]:
|
||||
break
|
||||
frag_content, frag_index = download_fragment(fragment, ctx)
|
||||
result = append_fragment(decrypt_fragment(fragment, frag_content), frag_index, ctx)
|
||||
if not result:
|
||||
|
||||
@@ -77,6 +77,15 @@ def real_download(self, filename, info_dict):
|
||||
message = ('The stream has AES-128 encryption and neither ffmpeg nor pycryptodomex are available; '
|
||||
'Decryption will be performed natively, but will be extremely slow')
|
||||
if not can_download:
|
||||
has_drm = re.search('|'.join([
|
||||
r'#EXT-X-FAXS-CM:', # Adobe Flash Access
|
||||
r'#EXT-X-(?:SESSION-)?KEY:.*?URI="skd://', # Apple FairPlay
|
||||
]), s)
|
||||
if has_drm and not self.params.get('allow_unplayable_formats'):
|
||||
self.report_error(
|
||||
'This video is DRM protected; Try selecting another format with --format or '
|
||||
'add --check-formats to automatically fallback to the next best format')
|
||||
return False
|
||||
message = message or 'Unsupported features have been detected'
|
||||
fd = FFmpegFD(self.ydl, self.params)
|
||||
self.report_warning(f'{message}; extraction will be delegated to {fd.get_basename()}')
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
ContentTooShortError,
|
||||
encodeFilename,
|
||||
int_or_none,
|
||||
sanitize_open,
|
||||
sanitized_Request,
|
||||
ThrottledDownload,
|
||||
write_xattr,
|
||||
@@ -263,7 +262,7 @@ def retry(e):
|
||||
# Open destination file just in time
|
||||
if ctx.stream is None:
|
||||
try:
|
||||
ctx.stream, ctx.tmpfilename = sanitize_open(
|
||||
ctx.stream, ctx.tmpfilename = self.sanitize_open(
|
||||
ctx.tmpfilename, ctx.open_mode)
|
||||
assert ctx.stream is not None
|
||||
ctx.filename = self.undo_temp_name(ctx.tmpfilename)
|
||||
|
||||
@@ -114,8 +114,8 @@ def real_download(self, filename, info_dict):
|
||||
fragment_base_url = info_dict.get('fragment_base_url')
|
||||
fragments = info_dict['fragments'][:1] if self.params.get(
|
||||
'test', False) else info_dict['fragments']
|
||||
title = info_dict['title']
|
||||
origin = info_dict['webpage_url']
|
||||
title = info_dict.get('title', info_dict['format_id'])
|
||||
origin = info_dict.get('webpage_url', info_dict['url'])
|
||||
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
dict_get,
|
||||
ExtractorError,
|
||||
js_to_json,
|
||||
int_or_none,
|
||||
@@ -233,8 +234,6 @@ def tokenize_url(url, token):
|
||||
}]
|
||||
|
||||
is_live = video_params.get('livestream') == '1'
|
||||
if is_live:
|
||||
title = self._live_title(title)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@@ -255,3 +254,66 @@ def tokenize_url(url, token):
|
||||
'subtitles': subtitles,
|
||||
'is_live': is_live,
|
||||
}
|
||||
|
||||
|
||||
class ABCIViewShowSeriesIE(InfoExtractor):
|
||||
IE_NAME = 'abc.net.au:iview:showseries'
|
||||
_VALID_URL = r'https?://iview\.abc\.net\.au/show/(?P<id>[^/]+)(?:/series/\d+)?$'
|
||||
_GEO_COUNTRIES = ['AU']
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://iview.abc.net.au/show/upper-middle-bogan',
|
||||
'info_dict': {
|
||||
'id': '124870-1',
|
||||
'title': 'Series 1',
|
||||
'description': 'md5:93119346c24a7c322d446d8eece430ff',
|
||||
'series': 'Upper Middle Bogan',
|
||||
'season': 'Series 1',
|
||||
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$'
|
||||
},
|
||||
'playlist_count': 8,
|
||||
}, {
|
||||
'url': 'https://iview.abc.net.au/show/upper-middle-bogan',
|
||||
'info_dict': {
|
||||
'id': 'CO1108V001S00',
|
||||
'ext': 'mp4',
|
||||
'title': 'Series 1 Ep 1 I\'m A Swan',
|
||||
'description': 'md5:7b676758c1de11a30b79b4d301e8da93',
|
||||
'series': 'Upper Middle Bogan',
|
||||
'uploader_id': 'abc1',
|
||||
'upload_date': '20210630',
|
||||
'timestamp': 1625036400,
|
||||
},
|
||||
'params': {
|
||||
'noplaylist': True,
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, show_id)
|
||||
webpage_data = self._search_regex(
|
||||
r'window\.__INITIAL_STATE__\s*=\s*[\'"](.+?)[\'"]\s*;',
|
||||
webpage, 'initial state')
|
||||
video_data = self._parse_json(
|
||||
unescapeHTML(webpage_data).encode('utf-8').decode('unicode_escape'), show_id)
|
||||
video_data = video_data['route']['pageData']['_embedded']
|
||||
|
||||
if self.get_param('noplaylist') and 'highlightVideo' in video_data:
|
||||
self.to_screen('Downloading just the highlight video because of --no-playlist')
|
||||
return self.url_result(video_data['highlightVideo']['shareUrl'], ie=ABCIViewIE.ie_key())
|
||||
|
||||
self.to_screen(f'Downloading playlist {show_id} - add --no-playlist to just download the highlight video')
|
||||
series = video_data['selectedSeries']
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'entries': [self.url_result(episode['shareUrl'])
|
||||
for episode in series['_embedded']['videoEpisodes']],
|
||||
'id': series.get('id'),
|
||||
'title': dict_get(series, ('title', 'displaySubtitle')),
|
||||
'description': series.get('description'),
|
||||
'series': dict_get(series, ('showTitle', 'displayTitle')),
|
||||
'season': dict_get(series, ('title', 'displaySubtitle')),
|
||||
'thumbnail': series.get('thumbnail'),
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ def _real_extract(self, url):
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'is_live': is_live,
|
||||
}
|
||||
|
||||
@@ -1,55 +1,86 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
try_get,
|
||||
)
|
||||
|
||||
|
||||
class AlJazeeraIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?aljazeera\.com/(?P<type>program/[^/]+|(?:feature|video)s)/\d{4}/\d{1,2}/\d{1,2}/(?P<id>[^/?&#]+)'
|
||||
_VALID_URL = r'https?://(?P<base>\w+\.aljazeera\.\w+)/(?P<type>programs?/[^/]+|(?:feature|video|new)s)?/\d{4}/\d{1,2}/\d{1,2}/(?P<id>[^/?&#]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.aljazeera.com/program/episode/2014/9/19/deliverance',
|
||||
'url': 'https://balkans.aljazeera.net/videos/2021/11/6/pojedini-domovi-u-sarajevu-jos-pod-vodom-mjestanima-se-dostavlja-hrana',
|
||||
'info_dict': {
|
||||
'id': '3792260579001',
|
||||
'id': '6280641530001',
|
||||
'ext': 'mp4',
|
||||
'title': 'The Slum - Episode 1: Deliverance',
|
||||
'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.',
|
||||
'uploader_id': '665003303001',
|
||||
'timestamp': 1411116829,
|
||||
'upload_date': '20140919',
|
||||
'title': 'Pojedini domovi u Sarajevu još pod vodom, mještanima se dostavlja hrana',
|
||||
'timestamp': 1636219149,
|
||||
'description': 'U sarajevskim naseljima Rajlovac i Reljevo stambeni objekti, ali i industrijska postrojenja i dalje su pod vodom.',
|
||||
'upload_date': '20211106',
|
||||
}
|
||||
}, {
|
||||
'url': 'https://balkans.aljazeera.net/videos/2021/11/6/djokovic-usao-u-finale-mastersa-u-parizu',
|
||||
'info_dict': {
|
||||
'id': '6280654936001',
|
||||
'ext': 'mp4',
|
||||
'title': 'Đoković ušao u finale Mastersa u Parizu',
|
||||
'timestamp': 1636221686,
|
||||
'description': 'Novak Đoković je u polufinalu Mastersa u Parizu nakon preokreta pobijedio Poljaka Huberta Hurkacza.',
|
||||
'upload_date': '20211106',
|
||||
},
|
||||
'add_ie': ['BrightcoveNew'],
|
||||
'skip': 'Not accessible from Travis CI server',
|
||||
}, {
|
||||
'url': 'https://www.aljazeera.com/videos/2017/5/11/sierra-leone-709-carat-diamond-to-be-auctioned-off',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.aljazeera.com/features/2017/8/21/transforming-pakistans-buses-into-art',
|
||||
'only_matching': True,
|
||||
}]
|
||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
|
||||
BRIGHTCOVE_URL_RE = r'https?://players.brightcove.net/(?P<account>\d+)/(?P<player_id>[a-zA-Z0-9]+)_(?P<embed>[^/]+)/index.html\?videoId=(?P<id>\d+)'
|
||||
|
||||
def _real_extract(self, url):
|
||||
post_type, name = self._match_valid_url(url).groups()
|
||||
base, post_type, id = self._match_valid_url(url).groups()
|
||||
wp = {
|
||||
'balkans.aljazeera.net': 'ajb',
|
||||
'chinese.aljazeera.net': 'chinese',
|
||||
'mubasher.aljazeera.net': 'ajm',
|
||||
}.get(base) or 'aje'
|
||||
post_type = {
|
||||
'features': 'post',
|
||||
'program': 'episode',
|
||||
'programs': 'episode',
|
||||
'videos': 'video',
|
||||
'news': 'news',
|
||||
}[post_type.split('/')[0]]
|
||||
video = self._download_json(
|
||||
'https://www.aljazeera.com/graphql', name, query={
|
||||
f'https://{base}/graphql', id, query={
|
||||
'wp-site': wp,
|
||||
'operationName': 'ArchipelagoSingleArticleQuery',
|
||||
'variables': json.dumps({
|
||||
'name': name,
|
||||
'name': id,
|
||||
'postType': post_type,
|
||||
}),
|
||||
}, headers={
|
||||
'wp-site': 'aje',
|
||||
})['data']['article']['video']
|
||||
video_id = video['id']
|
||||
account_id = video.get('accountId') or '665003303001'
|
||||
player_id = video.get('playerId') or 'BkeSH5BDb'
|
||||
return self.url_result(
|
||||
self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id),
|
||||
'BrightcoveNew', video_id)
|
||||
'wp-site': wp,
|
||||
})
|
||||
video = try_get(video, lambda x: x['data']['article']['video']) or {}
|
||||
video_id = video.get('id')
|
||||
account = video.get('accountId') or '911432371001'
|
||||
player_id = video.get('playerId') or 'csvTfAlKW'
|
||||
embed = 'default'
|
||||
|
||||
if video_id is None:
|
||||
webpage = self._download_webpage(url, id)
|
||||
|
||||
account, player_id, embed, video_id = self._search_regex(self.BRIGHTCOVE_URL_RE, webpage, 'video id',
|
||||
group=(1, 2, 3, 4), default=(None, None, None, None))
|
||||
|
||||
if video_id is None:
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': url,
|
||||
'ie_key': 'Generic'
|
||||
}
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': f'https://players.brightcove.net/{account}/{player_id}_{embed}/index.html?videoId={video_id}',
|
||||
'ie_key': 'BrightcoveNew'
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
|
||||
class AmazonStoreIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)(?:www\.)?amazon\.(?:[a-z]{2,3})(?:\.[a-z]{2})?/[^/]*/?(?:dp|gp/product)/(?P<id>[^/&#$?]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?amazon\.(?:[a-z]{2,3})(?:\.[a-z]{2})?/(?:[^/]+/)?(?:dp|gp/product)/(?P<id>[^/&#$?]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.amazon.co.uk/dp/B098XNCHLD/',
|
||||
|
||||
@@ -3,33 +3,36 @@
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .youtube import YoutubeIE
|
||||
from .youtube import YoutubeIE, YoutubeBaseInfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
compat_HTTPError
|
||||
)
|
||||
from ..utils import (
|
||||
bug_reports_message,
|
||||
clean_html,
|
||||
determine_ext,
|
||||
dict_get,
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
get_element_by_id,
|
||||
HEADRequest,
|
||||
int_or_none,
|
||||
KNOWN_EXTENSIONS,
|
||||
merge_dicts,
|
||||
mimetype2ext,
|
||||
orderedSet,
|
||||
parse_duration,
|
||||
parse_qs,
|
||||
RegexNotFoundError,
|
||||
str_to_int,
|
||||
str_or_none,
|
||||
traverse_obj,
|
||||
try_get,
|
||||
unified_strdate,
|
||||
unified_timestamp,
|
||||
urlhandle_detect_ext,
|
||||
url_or_none
|
||||
)
|
||||
|
||||
|
||||
@@ -262,12 +265,12 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
_VALID_URL = r"""(?x)^
|
||||
(?:https?://)?web\.archive\.org/
|
||||
(?:web/)?
|
||||
(?:[0-9A-Za-z_*]+/)? # /web and the version index is optional
|
||||
(?:(?P<date>[0-9]{14})?[0-9A-Za-z_*]*/)? # /web and the version index is optional
|
||||
|
||||
(?:https?(?::|%3[Aa])//)?
|
||||
(?:
|
||||
(?:\w+\.)?youtube\.com/watch(?:\?|%3[fF])(?:[^\#]+(?:&|%26))?v(?:=|%3[dD]) # Youtube URL
|
||||
|(wayback-fakeurl\.archive\.org/yt/) # Or the internal fake url
|
||||
(?:\w+\.)?youtube\.com(?::(?:80|443))?/watch(?:\.php)?(?:\?|%3[fF])(?:[^\#]+(?:&|%26))?v(?:=|%3[dD]) # Youtube URL
|
||||
|(?:wayback-fakeurl\.archive\.org/yt/) # Or the internal fake url
|
||||
)
|
||||
(?P<id>[0-9A-Za-z_-]{11})(?:%26|\#|&|$)
|
||||
"""
|
||||
@@ -278,141 +281,391 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'aYAGB11YrSs',
|
||||
'ext': 'webm',
|
||||
'title': 'Team Fortress 2 - Sandviches!'
|
||||
'title': 'Team Fortress 2 - Sandviches!',
|
||||
'description': 'md5:4984c0f9a07f349fc5d8e82ab7af4eaf',
|
||||
'upload_date': '20110926',
|
||||
'uploader': 'Zeurel',
|
||||
'channel_id': 'UCukCyHaD-bK3in_pKpfH9Eg',
|
||||
'duration': 32,
|
||||
'uploader_id': 'Zeurel',
|
||||
'uploader_url': 'http://www.youtube.com/user/Zeurel'
|
||||
}
|
||||
},
|
||||
{
|
||||
}, {
|
||||
# Internal link
|
||||
'url': 'https://web.archive.org/web/2oe/http://wayback-fakeurl.archive.org/yt/97t7Xj_iBv0',
|
||||
'info_dict': {
|
||||
'id': '97t7Xj_iBv0',
|
||||
'ext': 'mp4',
|
||||
'title': 'How Flexible Machines Could Save The World'
|
||||
'title': 'Why Machines That Bend Are Better',
|
||||
'description': 'md5:00404df2c632d16a674ff8df1ecfbb6c',
|
||||
'upload_date': '20190312',
|
||||
'uploader': 'Veritasium',
|
||||
'channel_id': 'UCHnyfMqiRRG1u-2MsSQLbXA',
|
||||
'duration': 771,
|
||||
'uploader_id': '1veritasium',
|
||||
'uploader_url': 'http://www.youtube.com/user/1veritasium'
|
||||
}
|
||||
},
|
||||
{
|
||||
# Video from 2012, webm format itag 45.
|
||||
}, {
|
||||
# Video from 2012, webm format itag 45. Newest capture is deleted video, with an invalid description.
|
||||
# Should use the date in the link. Title ends with '- Youtube'. Capture has description in eow-description
|
||||
'url': 'https://web.archive.org/web/20120712231619/http://www.youtube.com/watch?v=AkhihxRKcrs&gl=US&hl=en',
|
||||
'info_dict': {
|
||||
'id': 'AkhihxRKcrs',
|
||||
'ext': 'webm',
|
||||
'title': 'Limited Run: Mondo\'s Modern Classic 1 of 3 (SDCC 2012)'
|
||||
'title': 'Limited Run: Mondo\'s Modern Classic 1 of 3 (SDCC 2012)',
|
||||
'upload_date': '20120712',
|
||||
'duration': 398,
|
||||
'description': 'md5:ff4de6a7980cb65d951c2f6966a4f2f3',
|
||||
'uploader_id': 'machinima',
|
||||
'uploader_url': 'http://www.youtube.com/user/machinima'
|
||||
}
|
||||
},
|
||||
{
|
||||
# Old flash-only video. Webpage title starts with "YouTube - ".
|
||||
}, {
|
||||
# FLV video. Video file URL does not provide itag information
|
||||
'url': 'https://web.archive.org/web/20081211103536/http://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||
'info_dict': {
|
||||
'id': 'jNQXAC9IVRw',
|
||||
'ext': 'unknown_video',
|
||||
'title': 'Me at the zoo'
|
||||
'ext': 'flv',
|
||||
'title': 'Me at the zoo',
|
||||
'upload_date': '20050423',
|
||||
'channel_id': 'UC4QobU6STFB0P71PMvOGN5A',
|
||||
'duration': 19,
|
||||
'description': 'md5:10436b12e07ac43ff8df65287a56efb4',
|
||||
'uploader_id': 'jawed',
|
||||
'uploader_url': 'http://www.youtube.com/user/jawed'
|
||||
}
|
||||
},
|
||||
{
|
||||
# Flash video with .flv extension (itag 34). Title has prefix "YouTube -"
|
||||
# Title has some weird unicode characters too.
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA',
|
||||
'info_dict': {
|
||||
'id': 'lTx3G6h2xyA',
|
||||
'ext': 'flv',
|
||||
'title': 'Madeon - Pop Culture (live mashup)'
|
||||
'title': 'Madeon - Pop Culture (live mashup)',
|
||||
'upload_date': '20110711',
|
||||
'uploader': 'Madeon',
|
||||
'channel_id': 'UCqMDNf3Pn5L7pcNkuSEeO3w',
|
||||
'duration': 204,
|
||||
'description': 'md5:f7535343b6eda34a314eff8b85444680',
|
||||
'uploader_id': 'itsmadeon',
|
||||
'uploader_url': 'http://www.youtube.com/user/itsmadeon'
|
||||
}
|
||||
},
|
||||
{ # Some versions of Youtube have have "YouTube" as page title in html (and later rewritten by js).
|
||||
}, {
|
||||
# First capture is of dead video, second is the oldest from CDX response.
|
||||
'url': 'https://web.archive.org/https://www.youtube.com/watch?v=1JYutPM8O6E',
|
||||
'info_dict': {
|
||||
'id': '1JYutPM8O6E',
|
||||
'ext': 'mp4',
|
||||
'title': 'Fake Teen Doctor Strikes AGAIN! - Weekly Weird News',
|
||||
'upload_date': '20160218',
|
||||
'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
|
||||
'duration': 1236,
|
||||
'description': 'md5:21032bae736421e89c2edf36d1936947',
|
||||
'uploader_id': 'MachinimaETC',
|
||||
'uploader_url': 'http://www.youtube.com/user/MachinimaETC'
|
||||
}
|
||||
}, {
|
||||
# First capture of dead video, capture date in link links to dead capture.
|
||||
'url': 'https://web.archive.org/web/20180803221945/https://www.youtube.com/watch?v=6FPhZJGvf4E',
|
||||
'info_dict': {
|
||||
'id': '6FPhZJGvf4E',
|
||||
'ext': 'mp4',
|
||||
'title': 'WTF: Video Games Still Launch BROKEN?! - T.U.G.S.',
|
||||
'upload_date': '20160219',
|
||||
'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
|
||||
'duration': 798,
|
||||
'description': 'md5:a1dbf12d9a3bd7cb4c5e33b27d77ffe7',
|
||||
'uploader_id': 'MachinimaETC',
|
||||
'uploader_url': 'http://www.youtube.com/user/MachinimaETC'
|
||||
},
|
||||
'expected_warnings': [
|
||||
r'unable to download capture webpage \(it may not be archived\)'
|
||||
]
|
||||
}, { # Very old YouTube page, has - YouTube in title.
|
||||
'url': 'http://web.archive.org/web/20070302011044/http://youtube.com/watch?v=-06-KB9XTzg',
|
||||
'info_dict': {
|
||||
'id': '-06-KB9XTzg',
|
||||
'ext': 'flv',
|
||||
'title': 'New Coin Hack!! 100% Safe!!'
|
||||
}
|
||||
}, {
|
||||
'url': 'web.archive.org/https://www.youtube.com/watch?v=dWW7qP423y8',
|
||||
'info_dict': {
|
||||
'id': 'dWW7qP423y8',
|
||||
'ext': 'mp4',
|
||||
'title': 'It\'s Bootleg AirPods Time.',
|
||||
'upload_date': '20211021',
|
||||
'channel_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
|
||||
'channel_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug',
|
||||
'duration': 810,
|
||||
'description': 'md5:7b567f898d8237b256f36c1a07d6d7bc',
|
||||
'uploader': 'DankPods',
|
||||
'uploader_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
|
||||
'uploader_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug'
|
||||
}
|
||||
}, {
|
||||
# player response contains '};' See: https://github.com/ytdl-org/youtube-dl/issues/27093
|
||||
'url': 'https://web.archive.org/web/20200827003909if_/http://www.youtube.com/watch?v=6Dh-RL__uN4',
|
||||
'info_dict': {
|
||||
'id': '6Dh-RL__uN4',
|
||||
'ext': 'mp4',
|
||||
'title': 'bitch lasagna',
|
||||
'upload_date': '20181005',
|
||||
'channel_id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw',
|
||||
'channel_url': 'http://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw',
|
||||
'duration': 135,
|
||||
'description': 'md5:2dbe4051feeff2dab5f41f82bb6d11d0',
|
||||
'uploader': 'PewDiePie',
|
||||
'uploader_id': 'PewDiePie',
|
||||
'uploader_url': 'http://www.youtube.com/user/PewDiePie'
|
||||
}
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw',
|
||||
'info_dict': {
|
||||
'id': 'kH-G_aIBlFw',
|
||||
'ext': 'mp4',
|
||||
'title': 'kH-G_aIBlFw'
|
||||
},
|
||||
'expected_warnings': [
|
||||
'unable to extract title',
|
||||
]
|
||||
},
|
||||
{
|
||||
# First capture is a 302 redirect intermediary page.
|
||||
'url': 'https://web.archive.org/web/20050214000000/http://www.youtube.com/watch?v=0altSZ96U4M',
|
||||
'info_dict': {
|
||||
'id': '0altSZ96U4M',
|
||||
'ext': 'mp4',
|
||||
'title': '0altSZ96U4M'
|
||||
},
|
||||
'expected_warnings': [
|
||||
'unable to extract title',
|
||||
]
|
||||
},
|
||||
{
|
||||
'only_matching': True
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/20050214000000_if/http://www.youtube.com/watch?v=0altSZ96U4M',
|
||||
'only_matching': True
|
||||
}, {
|
||||
# Video not archived, only capture is unavailable video page
|
||||
'url': 'https://web.archive.org/web/20210530071008/https://www.youtube.com/watch?v=lHJTf93HL1s&spfreload=10',
|
||||
'only_matching': True,
|
||||
},
|
||||
{ # Encoded url
|
||||
'only_matching': True
|
||||
}, { # Encoded url
|
||||
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fgl%3DUS%26v%3DAkhihxRKcrs%26hl%3Den',
|
||||
'only_matching': True,
|
||||
},
|
||||
{
|
||||
'only_matching': True
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fv%3DAkhihxRKcrs%26gl%3DUS%26hl%3Den',
|
||||
'only_matching': True,
|
||||
'only_matching': True
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/20060527081937/http://www.youtube.com:80/watch.php?v=ELTFsLT73fA&search=soccer',
|
||||
'only_matching': True
|
||||
}, {
|
||||
'url': 'https://web.archive.org/http://www.youtube.com:80/watch?v=-05VVye-ffg',
|
||||
'only_matching': True
|
||||
}
|
||||
]
|
||||
_YT_INITIAL_DATA_RE = r'(?:(?:(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_DATA_RE
|
||||
_YT_INITIAL_PLAYER_RESPONSE_RE = r'(?:(?:(?:window\s*\[\s*["\']ytInitialPlayerResponse["\']\s*\]|ytInitialPlayerResponse)\s*=[(\s]*({.+?})[)\s]*;)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_PLAYER_RESPONSE_RE
|
||||
_YT_INITIAL_BOUNDARY_RE = r'(?:(?:var\s+meta|</script|\n)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_BOUNDARY_RE
|
||||
|
||||
_YT_DEFAULT_THUMB_SERVERS = ['i.ytimg.com'] # thumbnails most likely archived on these servers
|
||||
_YT_ALL_THUMB_SERVERS = orderedSet(
|
||||
_YT_DEFAULT_THUMB_SERVERS + ['img.youtube.com', *[f'{c}{n or ""}.ytimg.com' for c in ('i', 's') for n in (*range(0, 5), 9)]])
|
||||
|
||||
_WAYBACK_BASE_URL = 'https://web.archive.org/web/%sif_/'
|
||||
_OLDEST_CAPTURE_DATE = 20050214000000
|
||||
_NEWEST_CAPTURE_DATE = 20500101000000
|
||||
|
||||
def _call_cdx_api(self, item_id, url, filters: list = None, collapse: list = None, query: dict = None, note='Downloading CDX API JSON'):
|
||||
# CDX docs: https://github.com/internetarchive/wayback/blob/master/wayback-cdx-server/README.md
|
||||
query = {
|
||||
'url': url,
|
||||
'output': 'json',
|
||||
'fl': 'original,mimetype,length,timestamp',
|
||||
'limit': 500,
|
||||
'filter': ['statuscode:200'] + (filters or []),
|
||||
'collapse': collapse or [],
|
||||
**(query or {})
|
||||
}
|
||||
res = self._download_json('https://web.archive.org/cdx/search/cdx', item_id, note, query=query)
|
||||
if isinstance(res, list) and len(res) >= 2:
|
||||
# format response to make it easier to use
|
||||
return list(dict(zip(res[0], v)) for v in res[1:])
|
||||
elif not isinstance(res, list) or len(res) != 0:
|
||||
self.report_warning('Error while parsing CDX API response' + bug_reports_message())
|
||||
|
||||
def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
|
||||
return self._parse_json(self._search_regex(
|
||||
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
|
||||
regex), webpage, name, default='{}'), video_id, fatal=False)
|
||||
|
||||
def _extract_webpage_title(self, webpage):
|
||||
page_title = self._html_search_regex(
|
||||
r'<title>([^<]*)</title>', webpage, 'title', default='')
|
||||
# YouTube video pages appear to always have either 'YouTube -' as prefix or '- YouTube' as suffix.
|
||||
return self._html_search_regex(
|
||||
r'(?:YouTube\s*-\s*(.*)$)|(?:(.*)\s*-\s*YouTube$)',
|
||||
page_title, 'title', default='')
|
||||
|
||||
def _extract_metadata(self, video_id, webpage):
|
||||
|
||||
search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None)) if webpage else (lambda x: None))
|
||||
player_response = self._extract_yt_initial_variable(
|
||||
webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE, video_id, 'initial player response') or {}
|
||||
initial_data = self._extract_yt_initial_variable(
|
||||
webpage, self._YT_INITIAL_DATA_RE, video_id, 'initial player response') or {}
|
||||
|
||||
initial_data_video = traverse_obj(
|
||||
initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents', ..., 'videoPrimaryInfoRenderer'),
|
||||
expected_type=dict, get_all=False, default={})
|
||||
|
||||
video_details = traverse_obj(
|
||||
player_response, 'videoDetails', expected_type=dict, get_all=False, default={})
|
||||
|
||||
microformats = traverse_obj(
|
||||
player_response, ('microformat', 'playerMicroformatRenderer'), expected_type=dict, get_all=False, default={})
|
||||
|
||||
video_title = (
|
||||
video_details.get('title')
|
||||
or YoutubeBaseInfoExtractor._get_text(microformats, 'title')
|
||||
or YoutubeBaseInfoExtractor._get_text(initial_data_video, 'title')
|
||||
or self._extract_webpage_title(webpage)
|
||||
or search_meta(['og:title', 'twitter:title', 'title']))
|
||||
|
||||
channel_id = str_or_none(
|
||||
video_details.get('channelId')
|
||||
or microformats.get('externalChannelId')
|
||||
or search_meta('channelId')
|
||||
or self._search_regex(
|
||||
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1', # @b45a9e6
|
||||
webpage, 'channel id', default=None, group='id'))
|
||||
channel_url = f'http://www.youtube.com/channel/{channel_id}' if channel_id else None
|
||||
|
||||
duration = int_or_none(
|
||||
video_details.get('lengthSeconds')
|
||||
or microformats.get('lengthSeconds')
|
||||
or parse_duration(search_meta('duration')))
|
||||
description = (
|
||||
video_details.get('shortDescription')
|
||||
or YoutubeBaseInfoExtractor._get_text(microformats, 'description')
|
||||
or clean_html(get_element_by_id('eow-description', webpage)) # @9e6dd23
|
||||
or search_meta(['description', 'og:description', 'twitter:description']))
|
||||
|
||||
uploader = video_details.get('author')
|
||||
|
||||
# Uploader ID and URL
|
||||
uploader_mobj = re.search(
|
||||
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">', # @fd05024
|
||||
webpage)
|
||||
if uploader_mobj is not None:
|
||||
uploader_id, uploader_url = uploader_mobj.group('uploader_id'), uploader_mobj.group('uploader_url')
|
||||
else:
|
||||
# @a6211d2
|
||||
uploader_url = url_or_none(microformats.get('ownerProfileUrl'))
|
||||
uploader_id = self._search_regex(
|
||||
r'(?:user|channel)/([^/]+)', uploader_url or '', 'uploader id', default=None)
|
||||
|
||||
upload_date = unified_strdate(
|
||||
dict_get(microformats, ('uploadDate', 'publishDate'))
|
||||
or search_meta(['uploadDate', 'datePublished'])
|
||||
or self._search_regex(
|
||||
[r'(?s)id="eow-date.*?>(.*?)</span>',
|
||||
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'], # @7998520
|
||||
webpage, 'upload date', default=None))
|
||||
|
||||
return {
|
||||
'title': video_title,
|
||||
'description': description,
|
||||
'upload_date': upload_date,
|
||||
'uploader': uploader,
|
||||
'channel_id': channel_id,
|
||||
'channel_url': channel_url,
|
||||
'duration': duration,
|
||||
'uploader_url': uploader_url,
|
||||
'uploader_id': uploader_id,
|
||||
}
|
||||
|
||||
def _extract_thumbnails(self, video_id):
|
||||
try_all = 'thumbnails' in self._configuration_arg('check_all')
|
||||
thumbnail_base_urls = ['http://{server}/vi{webp}/{video_id}'.format(
|
||||
webp='_webp' if ext == 'webp' else '', video_id=video_id, server=server)
|
||||
for server in (self._YT_ALL_THUMB_SERVERS if try_all else self._YT_DEFAULT_THUMB_SERVERS) for ext in (('jpg', 'webp') if try_all else ('jpg',))]
|
||||
|
||||
thumbnails = []
|
||||
for url in thumbnail_base_urls:
|
||||
response = self._call_cdx_api(
|
||||
video_id, url, filters=['mimetype:image/(?:webp|jpeg)'],
|
||||
collapse=['urlkey'], query={'matchType': 'prefix'})
|
||||
if not response:
|
||||
continue
|
||||
thumbnails.extend(
|
||||
{
|
||||
'url': (self._WAYBACK_BASE_URL % (int_or_none(thumbnail_dict.get('timestamp')) or self._OLDEST_CAPTURE_DATE)) + thumbnail_dict.get('original'),
|
||||
'filesize': int_or_none(thumbnail_dict.get('length')),
|
||||
'preference': int_or_none(thumbnail_dict.get('length'))
|
||||
} for thumbnail_dict in response)
|
||||
if not try_all:
|
||||
break
|
||||
|
||||
self._remove_duplicate_formats(thumbnails)
|
||||
return thumbnails
|
||||
|
||||
def _get_capture_dates(self, video_id, url_date):
|
||||
capture_dates = []
|
||||
# Note: CDX API will not find watch pages with extra params in the url.
|
||||
response = self._call_cdx_api(
|
||||
video_id, f'https://www.youtube.com/watch?v={video_id}',
|
||||
filters=['mimetype:text/html'], collapse=['timestamp:6', 'digest'], query={'matchType': 'prefix'}) or []
|
||||
all_captures = sorted([int_or_none(r['timestamp']) for r in response if int_or_none(r['timestamp']) is not None])
|
||||
|
||||
# Prefer the new polymer UI captures as we support extracting more metadata from them
|
||||
# WBM captures seem to all switch to this layout ~July 2020
|
||||
modern_captures = list(filter(lambda x: x >= 20200701000000, all_captures))
|
||||
if modern_captures:
|
||||
capture_dates.append(modern_captures[0])
|
||||
capture_dates.append(url_date)
|
||||
if all_captures:
|
||||
capture_dates.append(all_captures[0])
|
||||
|
||||
if 'captures' in self._configuration_arg('check_all'):
|
||||
capture_dates.extend(modern_captures + all_captures)
|
||||
|
||||
# Fallbacks if any of the above fail
|
||||
capture_dates.extend([self._OLDEST_CAPTURE_DATE, self._NEWEST_CAPTURE_DATE])
|
||||
return orderedSet(capture_dates)
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
title = video_id # if we are not able get a title
|
||||
|
||||
def _extract_title(webpage):
|
||||
page_title = self._html_search_regex(
|
||||
r'<title>([^<]*)</title>', webpage, 'title', fatal=False) or ''
|
||||
# YouTube video pages appear to always have either 'YouTube -' as suffix or '- YouTube' as prefix.
|
||||
try:
|
||||
page_title = self._html_search_regex(
|
||||
r'(?:YouTube\s*-\s*(.*)$)|(?:(.*)\s*-\s*YouTube$)',
|
||||
page_title, 'title', default='')
|
||||
except RegexNotFoundError:
|
||||
page_title = None
|
||||
url_date, video_id = self._match_valid_url(url).groups()
|
||||
|
||||
if not page_title:
|
||||
self.report_warning('unable to extract title', video_id=video_id)
|
||||
return
|
||||
return page_title
|
||||
|
||||
# If the video is no longer available, the oldest capture may be one before it was removed.
|
||||
# Setting the capture date in url to early date seems to redirect to earliest capture.
|
||||
webpage = self._download_webpage(
|
||||
'https://web.archive.org/web/20050214000000/http://www.youtube.com/watch?v=%s' % video_id,
|
||||
video_id=video_id, fatal=False, errnote='unable to download video webpage (probably not archived).')
|
||||
if webpage:
|
||||
title = _extract_title(webpage) or title
|
||||
|
||||
# Use link translator mentioned in https://github.com/ytdl-org/youtube-dl/issues/13655
|
||||
internal_fake_url = 'https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id
|
||||
urlh = None
|
||||
try:
|
||||
video_file_webpage = self._request_webpage(
|
||||
HEADRequest(internal_fake_url), video_id,
|
||||
note='Fetching video file url', expected_status=True)
|
||||
urlh = self._request_webpage(
|
||||
HEADRequest('https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id),
|
||||
video_id, note='Fetching archived video file url', expected_status=True)
|
||||
except ExtractorError as e:
|
||||
# HTTP Error 404 is expected if the video is not saved.
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
|
||||
raise ExtractorError(
|
||||
'HTTP Error %s. Most likely the video is not archived or issue with web.archive.org.' % e.cause.code,
|
||||
self.raise_no_formats(
|
||||
'The requested video is not archived, indexed, or there is an issue with web.archive.org',
|
||||
expected=True)
|
||||
raise
|
||||
video_file_url = compat_urllib_parse_unquote(video_file_webpage.url)
|
||||
video_file_url_qs = parse_qs(video_file_url)
|
||||
else:
|
||||
raise
|
||||
|
||||
# Attempt to recover any ext & format info from playback url
|
||||
format = {'url': video_file_url}
|
||||
itag = try_get(video_file_url_qs, lambda x: x['itag'][0])
|
||||
if itag and itag in YoutubeIE._formats: # Naughty access but it works
|
||||
format.update(YoutubeIE._formats[itag])
|
||||
format.update({'format_id': itag})
|
||||
else:
|
||||
mime = try_get(video_file_url_qs, lambda x: x['mime'][0])
|
||||
ext = mimetype2ext(mime) or determine_ext(video_file_url)
|
||||
format.update({'ext': ext})
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': [format],
|
||||
'duration': str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))
|
||||
}
|
||||
capture_dates = self._get_capture_dates(video_id, int_or_none(url_date))
|
||||
self.write_debug('Captures to try: ' + ', '.join(str(i) for i in capture_dates if i is not None))
|
||||
info = {'id': video_id}
|
||||
for capture in capture_dates:
|
||||
if not capture:
|
||||
continue
|
||||
webpage = self._download_webpage(
|
||||
(self._WAYBACK_BASE_URL + 'http://www.youtube.com/watch?v=%s') % (capture, video_id),
|
||||
video_id=video_id, fatal=False, errnote='unable to download capture webpage (it may not be archived)',
|
||||
note='Downloading capture webpage')
|
||||
current_info = self._extract_metadata(video_id, webpage or '')
|
||||
# Try avoid getting deleted video metadata
|
||||
if current_info.get('title'):
|
||||
info = merge_dicts(info, current_info)
|
||||
if 'captures' not in self._configuration_arg('check_all'):
|
||||
break
|
||||
|
||||
info['thumbnails'] = self._extract_thumbnails(video_id)
|
||||
|
||||
if urlh:
|
||||
url = compat_urllib_parse_unquote(urlh.url)
|
||||
video_file_url_qs = parse_qs(url)
|
||||
# Attempt to recover any ext & format info from playback url & response headers
|
||||
format = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))}
|
||||
itag = try_get(video_file_url_qs, lambda x: x['itag'][0])
|
||||
if itag and itag in YoutubeIE._formats:
|
||||
format.update(YoutubeIE._formats[itag])
|
||||
format.update({'format_id': itag})
|
||||
else:
|
||||
mime = try_get(video_file_url_qs, lambda x: x['mime'][0])
|
||||
ext = (mimetype2ext(mime)
|
||||
or urlhandle_detect_ext(urlh)
|
||||
or mimetype2ext(urlh.headers.get('x-archive-guessed-content-type')))
|
||||
format.update({'ext': ext})
|
||||
info['formats'] = [format]
|
||||
if not info.get('duration'):
|
||||
info['duration'] = str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))
|
||||
|
||||
if not info.get('title'):
|
||||
info['title'] = video_id
|
||||
return info
|
||||
|
||||
@@ -158,7 +158,7 @@ def _real_extract(self, url):
|
||||
|
||||
return {
|
||||
'id': uuid,
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'title': title,
|
||||
'thumbnail': try_get(video, lambda x: x['promo_image']['url']),
|
||||
'description': try_get(video, lambda x: x['subheadlines']['basic']),
|
||||
'formats': formats,
|
||||
|
||||
@@ -280,7 +280,7 @@ def _real_extract(self, url):
|
||||
|
||||
info.update({
|
||||
'id': video_id,
|
||||
'title': self._live_title(title) if info.get('is_live') else title,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
})
|
||||
@@ -388,7 +388,13 @@ def _real_extract(self, url):
|
||||
|
||||
|
||||
class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
_VALID_URL = r'https://(?:(?:beta|www)\.)?ardmediathek\.de/(?P<client>[^/]+)/(?P<mode>player|live|video|sendung|sammlung)/(?P<display_id>(?:[^/]+/)*)(?P<video_id>[a-zA-Z0-9]+)'
|
||||
_VALID_URL = r'''(?x)https://
|
||||
(?:(?:beta|www)\.)?ardmediathek\.de/
|
||||
(?:(?P<client>[^/]+)/)?
|
||||
(?:player|live|video|(?P<playlist>sendung|sammlung))/
|
||||
(?:(?P<display_id>[^?#]+)/)?
|
||||
(?P<id>(?(playlist)|Y3JpZDovL)[a-zA-Z0-9]+)'''
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.ardmediathek.de/mdr/video/die-robuste-roswita/Y3JpZDovL21kci5kZS9iZWl0cmFnL2Ntcy84MWMxN2MzZC0wMjkxLTRmMzUtODk4ZS0wYzhlOWQxODE2NGI/',
|
||||
'md5': 'a1dc75a39c61601b980648f7c9f9f71d',
|
||||
@@ -403,6 +409,18 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
'upload_date': '20200805',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
'skip': 'Error',
|
||||
}, {
|
||||
'url': 'https://www.ardmediathek.de/video/tagesschau-oder-tagesschau-20-00-uhr/das-erste/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhZ2Vzc2NoYXUvZmM4ZDUxMjgtOTE0ZC00Y2MzLTgzNzAtNDZkNGNiZWJkOTll',
|
||||
'md5': 'f1837e563323b8a642a8ddeff0131f51',
|
||||
'info_dict': {
|
||||
'id': '10049223',
|
||||
'ext': 'mp4',
|
||||
'title': 'tagesschau, 20:00 Uhr',
|
||||
'timestamp': 1636398000,
|
||||
'description': 'md5:39578c7b96c9fe50afdf5674ad985e6b',
|
||||
'upload_date': '20211108',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://beta.ardmediathek.de/ard/video/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE',
|
||||
'only_matching': True,
|
||||
@@ -426,6 +444,12 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
# playlist of type 'sammlung'
|
||||
'url': 'https://www.ardmediathek.de/ard/sammlung/team-muenster/5JpTzLSbWUAK8184IOvEir/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.ardmediathek.de/video/coronavirus-update-ndr-info/astrazeneca-kurz-lockdown-und-pims-syndrom-81/ndr/Y3JpZDovL25kci5kZS84NzE0M2FjNi0wMWEwLTQ5ODEtOTE5NS1mOGZhNzdhOTFmOTI/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.ardmediathek.de/ard/player/Y3JpZDovL3dkci5kZS9CZWl0cmFnLWQ2NDJjYWEzLTMwZWYtNGI4NS1iMTI2LTU1N2UxYTcxOGIzOQ/tatort-duo-koeln-leipzig-ihr-kinderlein-kommet',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _ARD_load_playlist_snipped(self, playlist_id, display_id, client, mode, pageNumber):
|
||||
@@ -525,20 +549,12 @@ def _ARD_extract_playlist(self, url, playlist_id, display_id, client, mode):
|
||||
return self.playlist_result(entries, playlist_title=display_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id = mobj.group('video_id')
|
||||
display_id = mobj.group('display_id')
|
||||
if display_id:
|
||||
display_id = display_id.rstrip('/')
|
||||
if not display_id:
|
||||
display_id = video_id
|
||||
video_id, display_id, playlist_type, client = self._match_valid_url(url).group(
|
||||
'id', 'display_id', 'playlist', 'client')
|
||||
display_id, client = display_id or video_id, client or 'ard'
|
||||
|
||||
if mobj.group('mode') in ('sendung', 'sammlung'):
|
||||
# this is a playlist-URL
|
||||
return self._ARD_extract_playlist(
|
||||
url, video_id, display_id,
|
||||
mobj.group('client'),
|
||||
mobj.group('mode'))
|
||||
if playlist_type:
|
||||
return self._ARD_extract_playlist(url, video_id, display_id, client, playlist_type)
|
||||
|
||||
player_page = self._download_json(
|
||||
'https://api.ardmediathek.de/public-gateway',
|
||||
@@ -574,7 +590,7 @@ def _real_extract(self, url):
|
||||
}
|
||||
}
|
||||
}
|
||||
}''' % (mobj.group('client'), video_id),
|
||||
}''' % (client, video_id),
|
||||
}).encode(), headers={
|
||||
'Content-Type': 'application/json'
|
||||
})['data']['playerPage']
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
|
||||
class AudiomackIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/(?:song/|(?=.+/song/))(?P<id>[\w/-]+)'
|
||||
IE_NAME = 'audiomack'
|
||||
_TESTS = [
|
||||
# hosted on audiomack
|
||||
@@ -39,15 +39,16 @@ class AudiomackIE(InfoExtractor):
|
||||
'title': 'Black Mamba Freestyle [Prod. By Danny Wolf]',
|
||||
'uploader': 'ILOVEMAKONNEN',
|
||||
'upload_date': '20160414',
|
||||
}
|
||||
},
|
||||
'skip': 'Song has been removed from the site',
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
# URLs end with [uploader name]/[uploader title]
|
||||
# URLs end with [uploader name]/song/[uploader title]
|
||||
# this title is whatever the user types in, and is rarely
|
||||
# the proper song title. Real metadata is in the api response
|
||||
album_url_tag = self._match_id(url)
|
||||
album_url_tag = self._match_id(url).replace('/song/', '/')
|
||||
|
||||
# Request the extended version of the api for extra fields like artist and title
|
||||
api_response = self._download_json(
|
||||
@@ -73,13 +74,13 @@ def _real_extract(self, url):
|
||||
|
||||
|
||||
class AudiomackAlbumIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/album/(?P<id>[\w/-]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/(?:album/|(?=.+/album/))(?P<id>[\w/-]+)'
|
||||
IE_NAME = 'audiomack:album'
|
||||
_TESTS = [
|
||||
# Standard album playlist
|
||||
{
|
||||
'url': 'http://www.audiomack.com/album/flytunezcom/tha-tour-part-2-mixtape',
|
||||
'playlist_count': 15,
|
||||
'playlist_count': 11,
|
||||
'info_dict':
|
||||
{
|
||||
'id': '812251',
|
||||
@@ -95,24 +96,27 @@ class AudiomackAlbumIE(InfoExtractor):
|
||||
},
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'title': 'PPP (Pistol P Project) - 9. Heaven or Hell (CHIMACA) ft Zuse (prod by DJ FU)',
|
||||
'id': '837577',
|
||||
'title': 'PPP (Pistol P Project) - 8. Real (prod by SYK SENSE )',
|
||||
'id': '837576',
|
||||
'ext': 'mp3',
|
||||
'uploader': 'Lil Herb a.k.a. G Herbo',
|
||||
}
|
||||
}, {
|
||||
'info_dict': {
|
||||
'title': 'PPP (Pistol P Project) - 10. 4 Minutes Of Hell Part 4 (prod by DY OF 808 MAFIA)',
|
||||
'id': '837580',
|
||||
'ext': 'mp3',
|
||||
'uploader': 'Lil Herb a.k.a. G Herbo',
|
||||
}
|
||||
}],
|
||||
'params': {
|
||||
'playliststart': 9,
|
||||
'playlistend': 9,
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
# URLs end with [uploader name]/[uploader title]
|
||||
# URLs end with [uploader name]/album/[uploader title]
|
||||
# this title is whatever the user types in, and is rarely
|
||||
# the proper song title. Real metadata is in the api response
|
||||
album_url_tag = self._match_id(url)
|
||||
album_url_tag = self._match_id(url).replace('/album/', '/')
|
||||
result = {'_type': 'playlist', 'entries': []}
|
||||
# There is no one endpoint for album metadata - instead it is included/repeated in each song's metadata
|
||||
# Therefore we don't know how many songs the album has and must infi-loop until failure
|
||||
@@ -134,7 +138,7 @@ def _real_extract(self, url):
|
||||
# Pull out the album metadata and add to result (if it exists)
|
||||
for resultkey, apikey in [('id', 'album_id'), ('title', 'album_title')]:
|
||||
if apikey in api_response and resultkey not in result:
|
||||
result[resultkey] = api_response[apikey]
|
||||
result[resultkey] = compat_str(api_response[apikey])
|
||||
song_id = url_basename(api_response['url']).rpartition('.')[0]
|
||||
result['entries'].append({
|
||||
'id': compat_str(api_response.get('id', song_id)),
|
||||
|
||||
@@ -41,7 +41,7 @@ def _parse_video_data(self, video_data, video_id, is_live):
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'title': title,
|
||||
'description': video_data.get('description_en') or video_data.get('description_ar'),
|
||||
'thumbnail': 'http://admin.mangomolo.com/analytics/%s' % img if img else None,
|
||||
'duration': int_or_none(video_data.get('duration')),
|
||||
|
||||
@@ -451,9 +451,10 @@ def _download_playlist(self, playlist_id):
|
||||
playlist = self._download_json(
|
||||
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
|
||||
playlist_id, 'Downloading playlist JSON')
|
||||
formats = []
|
||||
subtitles = {}
|
||||
|
||||
version = playlist.get('defaultAvailableVersion')
|
||||
if version:
|
||||
for version in playlist.get('allAvailableVersions', []):
|
||||
smp_config = version['smpConfig']
|
||||
title = smp_config['title']
|
||||
description = smp_config['summary']
|
||||
@@ -463,8 +464,17 @@ def _download_playlist(self, playlist_id):
|
||||
continue
|
||||
programme_id = item.get('vpid')
|
||||
duration = int_or_none(item.get('duration'))
|
||||
formats, subtitles = self._download_media_selector(programme_id)
|
||||
return programme_id, title, description, duration, formats, subtitles
|
||||
version_formats, version_subtitles = self._download_media_selector(programme_id)
|
||||
types = version['types']
|
||||
for f in version_formats:
|
||||
f['format_note'] = ', '.join(types)
|
||||
if any('AudioDescribed' in x for x in types):
|
||||
f['language_preference'] = -10
|
||||
formats += version_formats
|
||||
for tag, subformats in (version_subtitles or {}).items():
|
||||
subtitles.setdefault(tag, []).extend(subformats)
|
||||
|
||||
return programme_id, title, description, duration, formats, subtitles
|
||||
except ExtractorError as ee:
|
||||
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
|
||||
raise
|
||||
|
||||
@@ -346,7 +346,8 @@ def _real_extract(self, url):
|
||||
def _extract_anthology_entries(self, bv_id, video_id, webpage):
|
||||
title = self._html_search_regex(
|
||||
(r'<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
|
||||
r'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
|
||||
r'(?s)<h1[^>]*>(?P<title>.+?)</h1>',
|
||||
r'<title>(?P<title>.+?)</title>'), webpage, 'title',
|
||||
group='title')
|
||||
json_data = self._download_json(
|
||||
f'https://api.bilibili.com/x/player/pagelist?bvid={bv_id}&jsonp=jsonp',
|
||||
|
||||
@@ -51,7 +51,7 @@ def _real_extract(self, url):
|
||||
|
||||
return {
|
||||
'id': username,
|
||||
'title': self._live_title(channel['data']['title']),
|
||||
'title': channel['data']['title'],
|
||||
'uploader': username,
|
||||
'uploader_id': username,
|
||||
'formats': formats,
|
||||
|
||||
54
yt_dlp/extractor/blogger.py
Normal file
54
yt_dlp/extractor/blogger.py
Normal file
@@ -0,0 +1,54 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from ..utils import (
|
||||
mimetype2ext,
|
||||
parse_duration,
|
||||
parse_qs,
|
||||
str_or_none,
|
||||
traverse_obj,
|
||||
)
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class BloggerIE(InfoExtractor):
|
||||
IE_NAME = 'blogger.com'
|
||||
_VALID_URL = r'https?://(?:www\.)?blogger\.com/video\.g\?token=(?P<id>.+)'
|
||||
_VALID_EMBED = r'''<iframe[^>]+src=["']((?:https?:)?//(?:www\.)?blogger\.com/video\.g\?token=[^"']+)["']'''
|
||||
_TESTS = [{
|
||||
'url': 'https://www.blogger.com/video.g?token=AD6v5dzEe9hfcARr5Hlq1WTkYy6t-fXH3BBahVhGvVHe5szdEUBEloSEDSTA8-b111089KbfWuBvTN7fnbxMtymsHhXAXwVvyzHH4Qch2cfLQdGxKQrrEuFpC1amSl_9GuLWODjPgw',
|
||||
'md5': 'f1bc19b6ea1b0fd1d81e84ca9ec467ac',
|
||||
'info_dict': {
|
||||
'id': 'BLOGGER-video-3c740e3a49197e16-796',
|
||||
'title': 'BLOGGER-video-3c740e3a49197e16-796',
|
||||
'ext': 'mp4',
|
||||
'thumbnail': r're:^https?://.*',
|
||||
'duration': 76.068,
|
||||
}
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
def _extract_urls(webpage):
|
||||
return re.findall(BloggerIE._VALID_EMBED, webpage)
|
||||
|
||||
def _real_extract(self, url):
|
||||
token_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, token_id)
|
||||
data_json = self._search_regex(r'var\s+VIDEO_CONFIG\s*=\s*(\{.*)', webpage, 'JSON data')
|
||||
data = self._parse_json(data_json.encode('utf-8').decode('unicode_escape'), token_id)
|
||||
streams = data['streams']
|
||||
formats = [{
|
||||
'ext': mimetype2ext(traverse_obj(parse_qs(stream['play_url']), ('mime', 0))),
|
||||
'url': stream['play_url'],
|
||||
'format_id': str_or_none(stream.get('format_id')),
|
||||
} for stream in streams]
|
||||
|
||||
return {
|
||||
'id': data.get('iframe_id', token_id),
|
||||
'title': data.get('iframe_id', token_id),
|
||||
'formats': formats,
|
||||
'thumbnail': data.get('thumbnail'),
|
||||
'duration': parse_duration(traverse_obj(parse_qs(streams[0]['play_url']), ('dur', 0))),
|
||||
}
|
||||
@@ -49,7 +49,7 @@ def _real_extract(self, url):
|
||||
|
||||
return {
|
||||
'id': channel_id,
|
||||
'title': self._live_title(uploader or uploader_id),
|
||||
'title': uploader or uploader_id,
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'like_count': like_count,
|
||||
|
||||
39
yt_dlp/extractor/breitbart.py
Normal file
39
yt_dlp/extractor/breitbart.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class BreitBartIE(InfoExtractor):
|
||||
_VALID_URL = r'https?:\/\/(?:www\.)breitbart.com/videos/v/(?P<id>[^/]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.breitbart.com/videos/v/5cOz1yup/?pl=Ij6NDOji',
|
||||
'md5': '0aa6d1d6e183ac5ca09207fe49f17ade',
|
||||
'info_dict': {
|
||||
'id': '5cOz1yup',
|
||||
'ext': 'mp4',
|
||||
'title': 'Watch \u2013 Clyburn: Statues in Congress Have to Go Because they Are Honoring Slavery',
|
||||
'description': 'md5:bac35eb0256d1cb17f517f54c79404d5',
|
||||
'thumbnail': 'https://cdn.jwplayer.com/thumbs/5cOz1yup-1920.jpg',
|
||||
'age_limit': 0,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://www.breitbart.com/videos/v/eaiZjVOn/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
formats = self._extract_m3u8_formats(f'https://cdn.jwplayer.com/manifests/{video_id}.m3u8', video_id, ext='mp4')
|
||||
self._sort_formats(formats)
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(
|
||||
webpage, default=None) or self._html_search_regex(
|
||||
r'(?s)<title>(.*?)</title>', webpage, 'video title'),
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'age_limit': self._rta_search(webpage),
|
||||
'formats': formats
|
||||
}
|
||||
@@ -16,6 +16,7 @@
|
||||
)
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
dict_get,
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
find_xpath_attr,
|
||||
@@ -471,32 +472,22 @@ def _extract_urls(ie, webpage):
|
||||
def _parse_brightcove_metadata(self, json_data, video_id, headers={}):
|
||||
title = json_data['name'].strip()
|
||||
|
||||
num_drm_sources = 0
|
||||
formats, subtitles = [], {}
|
||||
sources = json_data.get('sources') or []
|
||||
for source in sources:
|
||||
container = source.get('container')
|
||||
ext = mimetype2ext(source.get('type'))
|
||||
src = source.get('src')
|
||||
skip_unplayable = not self.get_param('allow_unplayable_formats')
|
||||
# https://support.brightcove.com/playback-api-video-fields-reference#key_systems_object
|
||||
if skip_unplayable and (container == 'WVM' or source.get('key_systems')):
|
||||
num_drm_sources += 1
|
||||
continue
|
||||
elif ext == 'ism' and skip_unplayable:
|
||||
continue
|
||||
elif ext == 'm3u8' or container == 'M2TS':
|
||||
if ext == 'm3u8' or container == 'M2TS':
|
||||
if not src:
|
||||
continue
|
||||
f, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
src, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
|
||||
formats.extend(f)
|
||||
subtitles = self._merge_subtitles(subtitles, subs)
|
||||
elif ext == 'mpd':
|
||||
if not src:
|
||||
continue
|
||||
f, subs = self._extract_mpd_formats_and_subtitles(src, video_id, 'dash', fatal=False)
|
||||
formats.extend(f)
|
||||
fmts, subs = self._extract_mpd_formats_and_subtitles(src, video_id, 'dash', fatal=False)
|
||||
subtitles = self._merge_subtitles(subtitles, subs)
|
||||
else:
|
||||
streaming_src = source.get('streaming_src')
|
||||
@@ -543,7 +534,13 @@ def build_format_id(kind):
|
||||
'play_path': stream_name,
|
||||
'format_id': build_format_id('rtmp'),
|
||||
})
|
||||
formats.append(f)
|
||||
fmts = [f]
|
||||
|
||||
# https://support.brightcove.com/playback-api-video-fields-reference#key_systems_object
|
||||
if container == 'WVM' or source.get('key_systems') or ext == 'ism':
|
||||
for f in fmts:
|
||||
f['has_drm'] = True
|
||||
formats.extend(fmts)
|
||||
|
||||
if not formats:
|
||||
errors = json_data.get('errors')
|
||||
@@ -551,9 +548,6 @@ def build_format_id(kind):
|
||||
error = errors[0]
|
||||
self.raise_no_formats(
|
||||
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
|
||||
elif (not self.get_param('allow_unplayable_formats')
|
||||
and sources and num_drm_sources == len(sources)):
|
||||
self.report_drm(video_id)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
@@ -577,11 +571,19 @@ def build_format_id(kind):
|
||||
if duration is not None and duration <= 0:
|
||||
is_live = True
|
||||
|
||||
common_res = [(160, 90), (320, 180), (480, 720), (640, 360), (768, 432), (1024, 576), (1280, 720), (1366, 768), (1920, 1080)]
|
||||
thumb_base_url = dict_get(json_data, ('poster', 'thumbnail'))
|
||||
thumbnails = [{
|
||||
'url': re.sub(r'\d+x\d+', f'{w}x{h}', thumb_base_url),
|
||||
'width': w,
|
||||
'height': h,
|
||||
} for w, h in common_res] if thumb_base_url else None
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'title': title,
|
||||
'description': clean_html(json_data.get('description')),
|
||||
'thumbnail': json_data.get('thumbnail') or json_data.get('poster'),
|
||||
'thumbnails': thumbnails,
|
||||
'duration': duration,
|
||||
'timestamp': parse_iso8601(json_data.get('published_at')),
|
||||
'uploader_id': json_data.get('account_id'),
|
||||
|
||||
34
yt_dlp/extractor/cableav.py
Normal file
34
yt_dlp/extractor/cableav.py
Normal file
@@ -0,0 +1,34 @@
|
||||
# coding: utf-8
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class CableAVIE(InfoExtractor):
|
||||
_VALID_URL = r'https://cableav\.tv/(?P<id>[a-zA-Z0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://cableav.tv/lS4iR9lWjN8/',
|
||||
'md5': '7e3fe5e49d61c4233b7f5b0f69b15e18',
|
||||
'info_dict': {
|
||||
'id': 'lS4iR9lWjN8',
|
||||
'ext': 'mp4',
|
||||
'title': '國產麻豆AV 叮叮映畫 DDF001 情欲小說家 - CableAV',
|
||||
'description': '國產AV 480p, 720p 国产麻豆AV 叮叮映画 DDF001 情欲小说家',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_url = self._og_search_video_url(webpage, secure=False)
|
||||
|
||||
formats = self._extract_m3u8_formats(video_url, video_id, 'mp4')
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'formats': formats,
|
||||
}
|
||||
@@ -25,7 +25,7 @@ def _real_extract(self, url):
|
||||
|
||||
return {
|
||||
'id': channel_id,
|
||||
'title': self._live_title(channel_id),
|
||||
'title': channel_id,
|
||||
'is_live': True,
|
||||
'age_limit': 18,
|
||||
'formats': formats,
|
||||
|
||||
@@ -91,7 +91,7 @@ def _real_extract(self, url):
|
||||
|
||||
return {
|
||||
'id': user_id,
|
||||
'title': self._live_title(user_id),
|
||||
'title': user_id,
|
||||
'is_live': True,
|
||||
'formats': formats,
|
||||
'age_limit': 18
|
||||
|
||||
98
yt_dlp/extractor/canalalpha.py
Normal file
98
yt_dlp/extractor/canalalpha.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
dict_get,
|
||||
try_get,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class CanalAlphaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?canalalpha\.ch/play/[^/]+/[^/]+/(?P<id>\d+)/?.*'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.canalalpha.ch/play/le-journal/episode/24520/jeudi-28-octobre-2021',
|
||||
'info_dict': {
|
||||
'id': '24520',
|
||||
'ext': 'mp4',
|
||||
'title': 'Jeudi 28 octobre 2021',
|
||||
'description': 'md5:d30c6c3e53f8ad40d405379601973b30',
|
||||
'thumbnail': 'https://static.canalalpha.ch/poster/journal/journal_20211028.jpg',
|
||||
'upload_date': '20211028',
|
||||
'duration': 1125,
|
||||
},
|
||||
'params': {'skip_download': True}
|
||||
}, {
|
||||
'url': 'https://www.canalalpha.ch/play/le-journal/topic/24512/la-poste-fait-de-neuchatel-un-pole-cryptographique',
|
||||
'info_dict': {
|
||||
'id': '24512',
|
||||
'ext': 'mp4',
|
||||
'title': 'La Poste fait de Neuchâtel un pôle cryptographique',
|
||||
'description': 'md5:4ba63ae78a0974d1a53d6703b6e1dedf',
|
||||
'thumbnail': 'https://static.canalalpha.ch/poster/news/news_39712.jpg',
|
||||
'upload_date': '20211028',
|
||||
'duration': 138,
|
||||
},
|
||||
'params': {'skip_download': True}
|
||||
}, {
|
||||
'url': 'https://www.canalalpha.ch/play/eureka/episode/24484/ces-innovations-qui-veulent-rendre-lagriculture-plus-durable',
|
||||
'info_dict': {
|
||||
'id': '24484',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ces innovations qui veulent rendre l’agriculture plus durable',
|
||||
'description': 'md5:3de3f151180684621e85be7c10e4e613',
|
||||
'thumbnail': 'https://static.canalalpha.ch/poster/magazine/magazine_10236.jpg',
|
||||
'upload_date': '20211026',
|
||||
'duration': 360,
|
||||
},
|
||||
'params': {'skip_download': True}
|
||||
}, {
|
||||
'url': 'https://www.canalalpha.ch/play/avec-le-temps/episode/23516/redonner-de-leclat-grace-au-polissage',
|
||||
'info_dict': {
|
||||
'id': '23516',
|
||||
'ext': 'mp4',
|
||||
'title': 'Redonner de l\'éclat grâce au polissage',
|
||||
'description': 'md5:0d8fbcda1a5a4d6f6daa3165402177e1',
|
||||
'thumbnail': 'https://static.canalalpha.ch/poster/magazine/magazine_9990.png',
|
||||
'upload_date': '20210726',
|
||||
'duration': 360,
|
||||
},
|
||||
'params': {'skip_download': True}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, id)
|
||||
data_json = self._parse_json(self._search_regex(
|
||||
r'window\.__SERVER_STATE__\s?=\s?({(?:(?!};)[^"]|"([^"]|\\")*")+})\s?;',
|
||||
webpage, 'data_json'), id)['1']['data']['data']
|
||||
manifests = try_get(data_json, lambda x: x['video']['manifests'], expected_type=dict) or {}
|
||||
subtitles = {}
|
||||
formats = [{
|
||||
'url': video['$url'],
|
||||
'ext': 'mp4',
|
||||
'width': try_get(video, lambda x: x['res']['width'], expected_type=int),
|
||||
'height': try_get(video, lambda x: x['res']['height'], expected_type=int),
|
||||
} for video in try_get(data_json, lambda x: x['video']['mp4'], expected_type=list) or [] if video.get('$url')]
|
||||
if manifests.get('hls'):
|
||||
m3u8_frmts, m3u8_subs = self._parse_m3u8_formats_and_subtitles(manifests['hls'], id)
|
||||
formats.extend(m3u8_frmts)
|
||||
subtitles = self._merge_subtitles(subtitles, m3u8_subs)
|
||||
if manifests.get('dash'):
|
||||
dash_frmts, dash_subs = self._parse_mpd_formats_and_subtitles(manifests['dash'], id)
|
||||
formats.extend(dash_frmts)
|
||||
subtitles = self._merge_subtitles(subtitles, dash_subs)
|
||||
self._sort_formats(formats)
|
||||
return {
|
||||
'id': id,
|
||||
'title': data_json.get('title').strip(),
|
||||
'description': clean_html(dict_get(data_json, ('longDesc', 'shortDesc'))),
|
||||
'thumbnail': data_json.get('poster'),
|
||||
'upload_date': unified_strdate(dict_get(data_json, ('webPublishAt', 'featuredAt', 'diffusionDate'))),
|
||||
'duration': try_get(data_json, lambda x: x['video']['duration'], expected_type=int),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
@@ -11,11 +11,13 @@
|
||||
compat_str,
|
||||
)
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
js_to_json,
|
||||
smuggle_url,
|
||||
try_get,
|
||||
orderedSet,
|
||||
smuggle_url,
|
||||
strip_or_none,
|
||||
try_get,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
@@ -313,6 +315,37 @@ def _real_initialize(self):
|
||||
return
|
||||
self._claims_token = self._downloader.cache.load(self._NETRC_MACHINE, 'claims_token')
|
||||
|
||||
def _find_secret_formats(self, formats, video_id):
|
||||
""" Find a valid video url and convert it to the secret variant """
|
||||
base_format = next((f for f in formats if f.get('vcodec') != 'none'), None)
|
||||
if not base_format:
|
||||
return
|
||||
|
||||
base_url = re.sub(r'(Manifest\(.*?),filter=[\w-]+(.*?\))', r'\1\2', base_format['url'])
|
||||
url = re.sub(r'(Manifest\(.*?),format=[\w-]+(.*?\))', r'\1\2', base_url)
|
||||
|
||||
secret_xml = self._download_xml(url, video_id, note='Downloading secret XML', fatal=False)
|
||||
if not secret_xml:
|
||||
return
|
||||
|
||||
for child in secret_xml:
|
||||
if child.attrib.get('Type') != 'video':
|
||||
continue
|
||||
for video_quality in child:
|
||||
bitrate = int_or_none(video_quality.attrib.get('Bitrate'))
|
||||
if not bitrate or 'Index' not in video_quality.attrib:
|
||||
continue
|
||||
height = int_or_none(video_quality.attrib.get('MaxHeight'))
|
||||
|
||||
yield {
|
||||
**base_format,
|
||||
'format_id': join_nonempty('sec', height),
|
||||
'url': re.sub(r'(QualityLevels\()\d+(\))', fr'\1{bitrate}\2', base_url),
|
||||
'width': int_or_none(video_quality.attrib.get('MaxWidth')),
|
||||
'tbr': bitrate / 1000.0,
|
||||
'height': height,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
video_info = self._download_json('https://services.radio-canada.ca/ott/cbc-api/v2/assets/' + video_id, video_id)
|
||||
@@ -335,6 +368,7 @@ def _real_extract(self, url):
|
||||
|
||||
formats = self._extract_m3u8_formats(m3u8_url, video_id, m3u8_id='hls')
|
||||
self._remove_duplicate_formats(formats)
|
||||
formats.extend(self._find_secret_formats(formats, video_id))
|
||||
|
||||
for format in formats:
|
||||
if format.get('vcodec') == 'none':
|
||||
@@ -390,7 +424,8 @@ def _real_extract(self, url):
|
||||
show = match.group('show')
|
||||
show_info = self._download_json(self._API_BASE + show, season_id)
|
||||
season = int(match.group('season'))
|
||||
season_info = try_get(show_info, lambda x: x['seasons'][season - 1])
|
||||
|
||||
season_info = next((s for s in show_info['seasons'] if s.get('season') == season), None)
|
||||
|
||||
if season_info is None:
|
||||
raise ExtractorError(f'Couldn\'t find season {season} of {show}')
|
||||
|
||||
@@ -12,8 +12,7 @@
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
sanitized_Request,
|
||||
unescapeHTML,
|
||||
update_url_query,
|
||||
traverse_obj,
|
||||
urlencode_postdata,
|
||||
USER_AGENTS,
|
||||
)
|
||||
@@ -99,11 +98,13 @@ def _real_extract(self, url):
|
||||
playlist_description = playlist_description.replace('\xa0', ' ')
|
||||
|
||||
if parsed_url.path.startswith('/porady/'):
|
||||
refer_url = update_url_query(unescapeHTML(self._search_regex(
|
||||
(r'<span[^>]*\bdata-url=(["\'])(?P<url>(?:(?!\1).)+)\1',
|
||||
r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?ceskatelevize\.cz/ivysilani/embed/iFramePlayer\.php.*?)\1'),
|
||||
webpage, 'iframe player url', group='url')), query={'autoStart': 'true'})
|
||||
webpage = self._download_webpage(refer_url, playlist_id)
|
||||
next_data = self._search_nextjs_data(webpage, playlist_id)
|
||||
idec = traverse_obj(next_data, ('props', 'pageProps', 'data', ('show', 'mediaMeta'), 'idec'), get_all=False)
|
||||
if not idec:
|
||||
raise ExtractorError('Failed to find IDEC id')
|
||||
iframe_hash = self._download_webpage('https://www.ceskatelevize.cz/v-api/iframe-hash/', playlist_id)
|
||||
webpage = self._download_webpage('https://www.ceskatelevize.cz/ivysilani/embed/iFramePlayer.php', playlist_id,
|
||||
query={'hash': iframe_hash, 'origin': 'iVysilani', 'autoStart': 'true', 'IDEC': idec})
|
||||
|
||||
NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
|
||||
if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
|
||||
@@ -211,8 +212,6 @@ def _real_extract(self, url):
|
||||
|
||||
if playlist_len == 1:
|
||||
final_title = playlist_title or title
|
||||
if is_live:
|
||||
final_title = self._live_title(final_title)
|
||||
else:
|
||||
final_title = '%s (%s)' % (playlist_title, title)
|
||||
|
||||
|
||||
@@ -101,7 +101,7 @@ def _real_extract(self, url):
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._live_title(video_id),
|
||||
'title': video_id,
|
||||
'thumbnail': 'https://roomimg.stream.highwebmedia.com/ri/%s.jpg' % video_id,
|
||||
'age_limit': self._rta_search(webpage),
|
||||
'is_live': True,
|
||||
|
||||
@@ -67,7 +67,7 @@ def _get_post(self, id, post_data):
|
||||
|
||||
|
||||
class ChingariIE(ChingariBaseIE):
|
||||
_VALID_URL = r'(?:https?://)(?:www\.)?chingari\.io/share/post\?id=(?P<id>[^&/#?]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?chingari\.io/share/post\?id=(?P<id>[^&/#?]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://chingari.io/share/post?id=612f8f4ce1dc57090e8a7beb',
|
||||
'info_dict': {
|
||||
@@ -102,7 +102,7 @@ def _real_extract(self, url):
|
||||
|
||||
|
||||
class ChingariUserIE(ChingariBaseIE):
|
||||
_VALID_URL = r'(?:https?://)(?:www\.)?chingari\.io/(?!share/post)(?P<id>[^/?]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?chingari\.io/(?!share/post)(?P<id>[^/?]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://chingari.io/dada1023',
|
||||
'playlist_mincount': 3,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import collections
|
||||
import hashlib
|
||||
import itertools
|
||||
import json
|
||||
@@ -163,9 +163,8 @@ class InfoExtractor(object):
|
||||
* filesize_approx An estimate for the number of bytes
|
||||
* player_url SWF Player URL (used for rtmpdump).
|
||||
* protocol The protocol that will be used for the actual
|
||||
download, lower-case.
|
||||
"http", "https", "rtsp", "rtmp", "rtmp_ffmpeg", "rtmpe",
|
||||
"m3u8", "m3u8_native" or "http_dash_segments".
|
||||
download, lower-case. One of "http", "https" or
|
||||
one of the protocols defined in downloader.PROTOCOL_MAP
|
||||
* fragment_base_url
|
||||
Base URL for fragments. Each fragment's path
|
||||
value (if present) will be relative to
|
||||
@@ -181,6 +180,8 @@ class InfoExtractor(object):
|
||||
fragment_base_url
|
||||
* "duration" (optional, int or float)
|
||||
* "filesize" (optional, int)
|
||||
* is_from_start Is a live format that can be downloaded
|
||||
from the start. Boolean
|
||||
* preference Order number of this format. If this field is
|
||||
present and not None, the formats get sorted
|
||||
by this field, regardless of all other values.
|
||||
@@ -342,6 +343,7 @@ class InfoExtractor(object):
|
||||
series, programme or podcast:
|
||||
|
||||
series: Title of the series or programme the video episode belongs to.
|
||||
series_id: Id of the series or programme the video episode belongs to, as a unicode string.
|
||||
season: Title of the season the video episode belongs to.
|
||||
season_number: Number of the season the video episode belongs to, as an integer.
|
||||
season_id: Id of the season the video episode belongs to, as a unicode string.
|
||||
@@ -464,6 +466,8 @@ def _match_valid_url(cls, url):
|
||||
# we have cached the regexp for *this* class, whereas getattr would also
|
||||
# match the superclass
|
||||
if '_VALID_URL_RE' not in cls.__dict__:
|
||||
if '_VALID_URL' not in cls.__dict__:
|
||||
cls._VALID_URL = cls._make_valid_url()
|
||||
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
||||
return cls._VALID_URL_RE.match(url)
|
||||
|
||||
@@ -612,7 +616,7 @@ def extract(self, url):
|
||||
kwargs = {
|
||||
'video_id': e.video_id or self.get_temp_id(url),
|
||||
'ie': self.IE_NAME,
|
||||
'tb': e.traceback,
|
||||
'tb': e.traceback or sys.exc_info()[2],
|
||||
'expected': e.expected,
|
||||
'cause': e.cause
|
||||
}
|
||||
@@ -1077,7 +1081,8 @@ def report_login(self):
|
||||
def raise_login_required(
|
||||
self, msg='This video is only available for registered users',
|
||||
metadata_available=False, method='any'):
|
||||
if metadata_available and self.get_param('ignore_no_formats_error'):
|
||||
if metadata_available and (
|
||||
self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
|
||||
self.report_warning(msg)
|
||||
if method is not None:
|
||||
msg = '%s. %s' % (msg, self._LOGIN_HINTS[method])
|
||||
@@ -1086,13 +1091,15 @@ def raise_login_required(
|
||||
def raise_geo_restricted(
|
||||
self, msg='This video is not available from your location due to geo restriction',
|
||||
countries=None, metadata_available=False):
|
||||
if metadata_available and self.get_param('ignore_no_formats_error'):
|
||||
if metadata_available and (
|
||||
self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
|
||||
self.report_warning(msg)
|
||||
else:
|
||||
raise GeoRestrictedError(msg, countries=countries)
|
||||
|
||||
def raise_no_formats(self, msg, expected=False, video_id=None):
|
||||
if expected and self.get_param('ignore_no_formats_error'):
|
||||
if expected and (
|
||||
self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
|
||||
self.report_warning(msg, video_id)
|
||||
elif isinstance(msg, ExtractorError):
|
||||
raise msg
|
||||
@@ -1445,11 +1452,19 @@ def extract_video_object(e):
|
||||
})
|
||||
extract_interaction_statistic(e)
|
||||
|
||||
for e in json_ld:
|
||||
if '@context' in e:
|
||||
def traverse_json_ld(json_ld, at_top_level=True):
|
||||
for e in json_ld:
|
||||
if at_top_level and '@context' not in e:
|
||||
continue
|
||||
if at_top_level and set(e.keys()) == {'@context', '@graph'}:
|
||||
traverse_json_ld(variadic(e['@graph'], allowed_types=(dict,)), at_top_level=False)
|
||||
break
|
||||
item_type = e.get('@type')
|
||||
if expected_type is not None and expected_type != item_type:
|
||||
continue
|
||||
rating = traverse_obj(e, ('aggregateRating', 'ratingValue'), expected_type=float_or_none)
|
||||
if rating is not None:
|
||||
info['average_rating'] = rating
|
||||
if item_type in ('TVEpisode', 'Episode'):
|
||||
episode_name = unescapeHTML(e.get('name'))
|
||||
info.update({
|
||||
@@ -1479,7 +1494,7 @@ def extract_video_object(e):
|
||||
info.update({
|
||||
'timestamp': parse_iso8601(e.get('datePublished')),
|
||||
'title': unescapeHTML(e.get('headline')),
|
||||
'description': unescapeHTML(e.get('articleBody')),
|
||||
'description': unescapeHTML(e.get('articleBody') or e.get('description')),
|
||||
})
|
||||
elif item_type == 'VideoObject':
|
||||
extract_video_object(e)
|
||||
@@ -1494,6 +1509,8 @@ def extract_video_object(e):
|
||||
continue
|
||||
else:
|
||||
break
|
||||
traverse_json_ld(json_ld)
|
||||
|
||||
return dict((k, v) for k, v in info.items() if v is not None)
|
||||
|
||||
def _search_nextjs_data(self, webpage, video_id, **kw):
|
||||
@@ -1503,6 +1520,24 @@ def _search_nextjs_data(self, webpage, video_id, **kw):
|
||||
webpage, 'next.js data', **kw),
|
||||
video_id, **kw)
|
||||
|
||||
def _search_nuxt_data(self, webpage, video_id, context_name='__NUXT__'):
|
||||
''' Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function. '''
|
||||
# not all website do this, but it can be changed
|
||||
# https://stackoverflow.com/questions/67463109/how-to-change-or-hide-nuxt-and-nuxt-keyword-in-page-source
|
||||
rectx = re.escape(context_name)
|
||||
js, arg_keys, arg_vals = self._search_regex(
|
||||
(r'<script>window\.%s=\(function\((?P<arg_keys>.*?)\)\{return\s(?P<js>\{.*?\})\}\((?P<arg_vals>.+?)\)\);?</script>' % rectx,
|
||||
r'%s\(.*?\(function\((?P<arg_keys>.*?)\)\{return\s(?P<js>\{.*?\})\}\((?P<arg_vals>.*?)\)' % rectx),
|
||||
webpage, context_name, group=['js', 'arg_keys', 'arg_vals'])
|
||||
|
||||
args = dict(zip(arg_keys.split(','), arg_vals.split(',')))
|
||||
|
||||
for key, val in args.items():
|
||||
if val in ('undefined', 'void 0'):
|
||||
args[key] = 'null'
|
||||
|
||||
return self._parse_json(js_to_json(js, args), video_id)['data'][0]
|
||||
|
||||
@staticmethod
|
||||
def _hidden_inputs(html):
|
||||
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
|
||||
@@ -1530,20 +1565,20 @@ class FormatSort:
|
||||
|
||||
default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
|
||||
'res', 'fps', 'hdr:12', 'codec:vp9.2', 'size', 'br', 'asr',
|
||||
'proto', 'ext', 'hasaud', 'source', 'format_id') # These must not be aliases
|
||||
'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases
|
||||
ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
|
||||
'height', 'width', 'proto', 'vext', 'abr', 'aext',
|
||||
'fps', 'fs_approx', 'source', 'format_id')
|
||||
'fps', 'fs_approx', 'source', 'id')
|
||||
|
||||
settings = {
|
||||
'vcodec': {'type': 'ordered', 'regex': True,
|
||||
'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']},
|
||||
'acodec': {'type': 'ordered', 'regex': True,
|
||||
'order': ['opus', 'vorbis', 'aac', 'mp?4a?', 'mp3', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']},
|
||||
'order': ['[af]lac', 'wav|aiff', 'opus', 'vorbis', 'aac', 'mp?4a?', 'mp3', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']},
|
||||
'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range',
|
||||
'order': ['dv', '(hdr)?12', r'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]},
|
||||
'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol',
|
||||
'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.+', '.*dash', 'ws|websocket', '', 'mms|rtsp', 'none', 'f4']},
|
||||
'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.*', '.*dash', 'websocket_frag', 'rtmpe?', '', 'mms|rtsp', 'ws|websocket', 'f4']},
|
||||
'vext': {'type': 'ordered', 'field': 'video_ext',
|
||||
'order': ('mp4', 'webm', 'flv', '', 'none'),
|
||||
'order_free': ('webm', 'mp4', 'flv', '', 'none')},
|
||||
@@ -1578,7 +1613,12 @@ class FormatSort:
|
||||
'res': {'type': 'multiple', 'field': ('height', 'width'),
|
||||
'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))},
|
||||
|
||||
# Most of these exist only for compatibility reasons
|
||||
# For compatibility with youtube-dl
|
||||
'format_id': {'type': 'alias', 'field': 'id'},
|
||||
'preference': {'type': 'alias', 'field': 'ie_pref'},
|
||||
'language_preference': {'type': 'alias', 'field': 'lang'},
|
||||
|
||||
# Deprecated
|
||||
'dimension': {'type': 'alias', 'field': 'res'},
|
||||
'resolution': {'type': 'alias', 'field': 'res'},
|
||||
'extension': {'type': 'alias', 'field': 'ext'},
|
||||
@@ -1587,7 +1627,6 @@ class FormatSort:
|
||||
'video_bitrate': {'type': 'alias', 'field': 'vbr'},
|
||||
'audio_bitrate': {'type': 'alias', 'field': 'abr'},
|
||||
'framerate': {'type': 'alias', 'field': 'fps'},
|
||||
'language_preference': {'type': 'alias', 'field': 'lang'}, # not named as 'language' because such a field exists
|
||||
'protocol': {'type': 'alias', 'field': 'proto'},
|
||||
'source_preference': {'type': 'alias', 'field': 'source'},
|
||||
'filesize_approx': {'type': 'alias', 'field': 'fs_approx'},
|
||||
@@ -1602,15 +1641,23 @@ class FormatSort:
|
||||
'audio': {'type': 'alias', 'field': 'hasaud'},
|
||||
'has_audio': {'type': 'alias', 'field': 'hasaud'},
|
||||
'extractor': {'type': 'alias', 'field': 'ie_pref'},
|
||||
'preference': {'type': 'alias', 'field': 'ie_pref'},
|
||||
'extractor_preference': {'type': 'alias', 'field': 'ie_pref'},
|
||||
'format_id': {'type': 'alias', 'field': 'id'},
|
||||
}
|
||||
|
||||
_order = []
|
||||
def __init__(self, ie, field_preference):
|
||||
self._order = []
|
||||
self.ydl = ie._downloader
|
||||
self.evaluate_params(self.ydl.params, field_preference)
|
||||
if ie.get_param('verbose'):
|
||||
self.print_verbose_info(self.ydl.write_debug)
|
||||
|
||||
def _get_field_setting(self, field, key):
|
||||
if field not in self.settings:
|
||||
if key in ('forced', 'priority'):
|
||||
return False
|
||||
self.ydl.deprecation_warning(
|
||||
f'Using arbitrary fields ({field}) for format sorting is deprecated '
|
||||
'and may be removed in a future version')
|
||||
self.settings[field] = {}
|
||||
propObj = self.settings[field]
|
||||
if key not in propObj:
|
||||
@@ -1693,7 +1740,11 @@ def add_item(field, reverse, closest, limit_text):
|
||||
if field is None:
|
||||
continue
|
||||
if self._get_field_setting(field, 'type') == 'alias':
|
||||
field = self._get_field_setting(field, 'field')
|
||||
alias, field = field, self._get_field_setting(field, 'field')
|
||||
if alias not in ('format_id', 'preference', 'language_preference'):
|
||||
self.ydl.deprecation_warning(
|
||||
f'Format sorting alias {alias} is deprecated '
|
||||
f'and may be removed in a future version. Please use {field} instead')
|
||||
reverse = match.group('reverse') is not None
|
||||
closest = match.group('separator') == '~'
|
||||
limit_text = match.group('limit')
|
||||
@@ -1797,10 +1848,7 @@ def calculate_preference(self, format):
|
||||
def _sort_formats(self, formats, field_preference=[]):
|
||||
if not formats:
|
||||
return
|
||||
format_sort = self.FormatSort() # params and to_screen are taken from the downloader
|
||||
format_sort.evaluate_params(self._downloader.params, field_preference)
|
||||
if self.get_param('verbose', False):
|
||||
format_sort.print_verbose_info(self._downloader.write_debug)
|
||||
format_sort = self.FormatSort(self, field_preference)
|
||||
formats.sort(key=lambda f: format_sort.calculate_preference(f))
|
||||
|
||||
def _check_formats(self, formats, video_id):
|
||||
@@ -2034,10 +2082,10 @@ def _parse_m3u8_formats_and_subtitles(
|
||||
video_id=None):
|
||||
formats, subtitles = [], {}
|
||||
|
||||
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
|
||||
return formats, subtitles
|
||||
|
||||
has_drm = re.search(r'#EXT-X-(?:SESSION-)?KEY:.*?URI="skd://', m3u8_doc)
|
||||
has_drm = re.search('|'.join([
|
||||
r'#EXT-X-FAXS-CM:', # Adobe Flash Access
|
||||
r'#EXT-X-(?:SESSION-)?KEY:.*?URI="skd://', # Apple FairPlay
|
||||
]), m3u8_doc)
|
||||
|
||||
def format_url(url):
|
||||
return url if re.match(r'^https?://', url) else compat_urlparse.urljoin(m3u8_url, url)
|
||||
@@ -2284,7 +2332,7 @@ def _extract_smil_formats_and_subtitles(self, smil_url, video_id, fatal=True, f4
|
||||
|
||||
if smil is False:
|
||||
assert not fatal
|
||||
return []
|
||||
return [], {}
|
||||
|
||||
namespace = self._parse_smil_namespace(smil)
|
||||
|
||||
@@ -2648,7 +2696,7 @@ def extract_Initialization(source):
|
||||
|
||||
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
|
||||
formats, subtitles = [], {}
|
||||
stream_numbers = {'audio': 0, 'video': 0}
|
||||
stream_numbers = collections.defaultdict(int)
|
||||
for period in mpd_doc.findall(_add_ns('Period')):
|
||||
period_duration = parse_duration(period.get('duration')) or mpd_duration
|
||||
period_ms_info = extract_multisegment_info(period, {
|
||||
@@ -2714,10 +2762,8 @@ def extract_Initialization(source):
|
||||
'format_note': 'DASH %s' % content_type,
|
||||
'filesize': filesize,
|
||||
'container': mimetype2ext(mime_type) + '_dash',
|
||||
'manifest_stream_number': stream_numbers[content_type]
|
||||
}
|
||||
f.update(parse_codecs(codecs))
|
||||
stream_numbers[content_type] += 1
|
||||
elif content_type == 'text':
|
||||
f = {
|
||||
'ext': mimetype2ext(mime_type),
|
||||
@@ -2884,7 +2930,9 @@ def add_segment_url():
|
||||
else:
|
||||
# Assuming direct URL to unfragmented media.
|
||||
f['url'] = base_url
|
||||
if content_type in ('video', 'audio') or mime_type == 'image/jpeg':
|
||||
if content_type in ('video', 'audio', 'image/jpeg'):
|
||||
f['manifest_stream_number'] = stream_numbers[f['url']]
|
||||
stream_numbers[f['url']] += 1
|
||||
formats.append(f)
|
||||
elif content_type == 'text':
|
||||
subtitles.setdefault(lang or 'und', []).append(f)
|
||||
@@ -3415,10 +3463,8 @@ def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
|
||||
return formats
|
||||
|
||||
def _live_title(self, name):
|
||||
""" Generate the title for a live video """
|
||||
now = datetime.datetime.now()
|
||||
now_str = now.strftime('%Y-%m-%d %H:%M')
|
||||
return name + ' ' + now_str
|
||||
self._downloader.deprecation_warning('yt_dlp.InfoExtractor._live_title is deprecated and does not work as expected')
|
||||
return name
|
||||
|
||||
def _int(self, v, name, fatal=False, **kwargs):
|
||||
res = int_or_none(v, **kwargs)
|
||||
@@ -3528,14 +3574,18 @@ def extract_comments(self, *args, **kwargs):
|
||||
|
||||
def extractor():
|
||||
comments = []
|
||||
interrupted = True
|
||||
try:
|
||||
while True:
|
||||
comments.append(next(generator))
|
||||
except KeyboardInterrupt:
|
||||
interrupted = True
|
||||
self.to_screen('Interrupted by user')
|
||||
except StopIteration:
|
||||
interrupted = False
|
||||
except KeyboardInterrupt:
|
||||
self.to_screen('Interrupted by user')
|
||||
except Exception as e:
|
||||
if self.get_param('ignoreerrors') is not True:
|
||||
raise
|
||||
self._downloader.report_error(e)
|
||||
comment_count = len(comments)
|
||||
self.to_screen(f'Extracted {comment_count} comments')
|
||||
return {
|
||||
@@ -3613,7 +3663,7 @@ def _availability(is_private=None, needs_premium=None, needs_subscription=None,
|
||||
else 'public' if all_known
|
||||
else None)
|
||||
|
||||
def _configuration_arg(self, key, default=NO_DEFAULT, casesense=False):
|
||||
def _configuration_arg(self, key, default=NO_DEFAULT, *, ie_key=None, casesense=False):
|
||||
'''
|
||||
@returns A list of values for the extractor argument given by "key"
|
||||
or "default" if no such key is present
|
||||
@@ -3621,7 +3671,7 @@ def _configuration_arg(self, key, default=NO_DEFAULT, casesense=False):
|
||||
@param casesense When false, the values are converted to lower case
|
||||
'''
|
||||
val = traverse_obj(
|
||||
self._downloader.params, ('extractor_args', self.ie_key().lower(), key))
|
||||
self._downloader.params, ('extractor_args', (ie_key or self.ie_key()).lower(), key))
|
||||
if val is None:
|
||||
return [] if default is NO_DEFAULT else default
|
||||
return list(val) if casesense else [x.lower() for x in val]
|
||||
@@ -3640,17 +3690,8 @@ class SearchInfoExtractor(InfoExtractor):
|
||||
def _make_valid_url(cls):
|
||||
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return re.match(cls._make_valid_url(), url) is not None
|
||||
|
||||
def _real_extract(self, query):
|
||||
mobj = re.match(self._make_valid_url(), query)
|
||||
if mobj is None:
|
||||
raise ExtractorError('Invalid search query "%s"' % query)
|
||||
|
||||
prefix = mobj.group('prefix')
|
||||
query = mobj.group('query')
|
||||
prefix, query = self._match_valid_url(query).group('prefix', 'query')
|
||||
if prefix == '':
|
||||
return self._get_n_results(query, 1)
|
||||
elif prefix == 'all':
|
||||
|
||||
40
yt_dlp/extractor/cozytv.py
Normal file
40
yt_dlp/extractor/cozytv.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import unified_strdate
|
||||
|
||||
|
||||
class CozyTVIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?cozy\.tv/(?P<uploader>[^/]+)/replays/(?P<id>[^/$#&?]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://cozy.tv/beardson/replays/2021-11-19_1',
|
||||
'info_dict': {
|
||||
'id': 'beardson-2021-11-19_1',
|
||||
'ext': 'mp4',
|
||||
'title': 'pokemon pt2',
|
||||
'uploader': 'beardson',
|
||||
'upload_date': '20211119',
|
||||
'was_live': True,
|
||||
'duration': 7981,
|
||||
},
|
||||
'params': {'skip_download': True}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
uploader, date = self._match_valid_url(url).groups()
|
||||
id = f'{uploader}-{date}'
|
||||
data_json = self._download_json(f'https://api.cozy.tv/cache/{uploader}/replay/{date}', id)
|
||||
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
|
||||
f'https://cozycdn.foxtrotstream.xyz/replays/{uploader}/{date}/index.m3u8', id, ext='mp4')
|
||||
return {
|
||||
'id': id,
|
||||
'title': data_json.get('title'),
|
||||
'uploader': data_json.get('user') or uploader,
|
||||
'upload_date': unified_strdate(data_json.get('date')),
|
||||
'was_live': True,
|
||||
'duration': data_json.get('duration'),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
@@ -23,32 +23,35 @@
|
||||
class CrackleIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:crackle:|https?://(?:(?:www|m)\.)?(?:sony)?crackle\.com/(?:playlist/\d+/|(?:[^/]+/)+))(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
# geo restricted to CA
|
||||
'url': 'https://www.crackle.com/andromeda/2502343',
|
||||
# Crackle is available in the United States and territories
|
||||
'url': 'https://www.crackle.com/thanksgiving/2510064',
|
||||
'info_dict': {
|
||||
'id': '2502343',
|
||||
'id': '2510064',
|
||||
'ext': 'mp4',
|
||||
'title': 'Under The Night',
|
||||
'description': 'md5:d2b8ca816579ae8a7bf28bfff8cefc8a',
|
||||
'duration': 2583,
|
||||
'title': 'Touch Football',
|
||||
'description': 'md5:cfbb513cf5de41e8b56d7ab756cff4df',
|
||||
'duration': 1398,
|
||||
'view_count': int,
|
||||
'average_rating': 0,
|
||||
'age_limit': 14,
|
||||
'genre': 'Action, Sci-Fi',
|
||||
'creator': 'Allan Kroeker',
|
||||
'artist': 'Keith Hamilton Cobb, Kevin Sorbo, Lisa Ryder, Lexa Doig, Robert Hewitt Wolfe',
|
||||
'release_year': 2000,
|
||||
'series': 'Andromeda',
|
||||
'episode': 'Under The Night',
|
||||
'age_limit': 17,
|
||||
'genre': 'Comedy',
|
||||
'creator': 'Daniel Powell',
|
||||
'artist': 'Chris Elliott, Amy Sedaris',
|
||||
'release_year': 2016,
|
||||
'series': 'Thanksgiving',
|
||||
'episode': 'Touch Football',
|
||||
'season_number': 1,
|
||||
'episode_number': 1,
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
'expected_warnings': [
|
||||
'Trying with a list of known countries'
|
||||
],
|
||||
}, {
|
||||
'url': 'https://www.sonycrackle.com/andromeda/2502343',
|
||||
'url': 'https://www.sonycrackle.com/thanksgiving/2510064',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@@ -129,7 +132,6 @@ def _real_extract(self, url):
|
||||
break
|
||||
|
||||
ignore_no_formats = self.get_param('ignore_no_formats_error')
|
||||
allow_unplayable_formats = self.get_param('allow_unplayable_formats')
|
||||
|
||||
if not media or (not media.get('MediaURLs') and not ignore_no_formats):
|
||||
raise ExtractorError(
|
||||
@@ -143,9 +145,9 @@ def _real_extract(self, url):
|
||||
for e in media.get('MediaURLs') or []:
|
||||
if e.get('UseDRM'):
|
||||
has_drm = True
|
||||
if not allow_unplayable_formats:
|
||||
continue
|
||||
format_url = url_or_none(e.get('Path'))
|
||||
format_url = url_or_none(e.get('DRMPath'))
|
||||
else:
|
||||
format_url = url_or_none(e.get('Path'))
|
||||
if not format_url:
|
||||
continue
|
||||
ext = determine_ext(format_url)
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
str_to_int,
|
||||
unescapeHTML,
|
||||
)
|
||||
from .senateisvp import SenateISVPIE
|
||||
from .senategov import SenateISVPIE
|
||||
from .ustream import UstreamIE
|
||||
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
class CuriosityStreamBaseIE(InfoExtractor):
|
||||
_NETRC_MACHINE = 'curiositystream'
|
||||
_auth_token = None
|
||||
_API_BASE_URL = 'https://api.curiositystream.com/v1/'
|
||||
|
||||
def _handle_errors(self, result):
|
||||
error = result.get('error', {}).get('message')
|
||||
@@ -39,37 +38,44 @@ def _real_initialize(self):
|
||||
if email is None:
|
||||
return
|
||||
result = self._download_json(
|
||||
self._API_BASE_URL + 'login', None, data=urlencode_postdata({
|
||||
'https://api.curiositystream.com/v1/login', None,
|
||||
note='Logging in', data=urlencode_postdata({
|
||||
'email': email,
|
||||
'password': password,
|
||||
}))
|
||||
self._handle_errors(result)
|
||||
self._auth_token = result['message']['auth_token']
|
||||
CuriosityStreamBaseIE._auth_token = result['message']['auth_token']
|
||||
|
||||
|
||||
class CuriosityStreamIE(CuriosityStreamBaseIE):
|
||||
IE_NAME = 'curiositystream'
|
||||
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/video/(?P<id>\d+)'
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'https://app.curiositystream.com/video/2',
|
||||
'info_dict': {
|
||||
'id': '2',
|
||||
'ext': 'mp4',
|
||||
'title': 'How Did You Develop The Internet?',
|
||||
'description': 'Vint Cerf, Google\'s Chief Internet Evangelist, describes how he and Bob Kahn created the internet.',
|
||||
'channel': 'Curiosity Stream',
|
||||
'categories': ['Technology', 'Interview'],
|
||||
'average_rating': 96.79,
|
||||
'series_id': '2',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}
|
||||
}]
|
||||
|
||||
_API_BASE_URL = 'https://api.curiositystream.com/v1/media/'
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
formats = []
|
||||
for encoding_format in ('m3u8', 'mpd'):
|
||||
media = self._call_api('media/' + video_id, video_id, query={
|
||||
media = self._call_api(video_id, video_id, query={
|
||||
'encodingsNew': 'true',
|
||||
'encodingsFormat': encoding_format,
|
||||
})
|
||||
@@ -139,12 +145,33 @@ def _real_extract(self, url):
|
||||
'duration': int_or_none(media.get('duration')),
|
||||
'tags': media.get('tags'),
|
||||
'subtitles': subtitles,
|
||||
'channel': media.get('producer'),
|
||||
'categories': [media.get('primary_category'), media.get('type')],
|
||||
'average_rating': media.get('rating_percentage'),
|
||||
'series_id': str(media.get('collection_id') or '') or None,
|
||||
}
|
||||
|
||||
|
||||
class CuriosityStreamCollectionIE(CuriosityStreamBaseIE):
|
||||
IE_NAME = 'curiositystream:collection'
|
||||
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/(?:collections?|series)/(?P<id>\d+)'
|
||||
class CuriosityStreamCollectionBaseIE(CuriosityStreamBaseIE):
|
||||
|
||||
def _real_extract(self, url):
|
||||
collection_id = self._match_id(url)
|
||||
collection = self._call_api(collection_id, collection_id)
|
||||
entries = []
|
||||
for media in collection.get('media', []):
|
||||
media_id = compat_str(media.get('id'))
|
||||
media_type, ie = ('series', CuriosityStreamSeriesIE) if media.get('is_collection') else ('video', CuriosityStreamIE)
|
||||
entries.append(self.url_result(
|
||||
'https://curiositystream.com/%s/%s' % (media_type, media_id),
|
||||
ie=ie.ie_key(), video_id=media_id))
|
||||
return self.playlist_result(
|
||||
entries, collection_id,
|
||||
collection.get('title'), collection.get('description'))
|
||||
|
||||
|
||||
class CuriosityStreamCollectionsIE(CuriosityStreamCollectionBaseIE):
|
||||
IE_NAME = 'curiositystream:collections'
|
||||
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/collections/(?P<id>\d+)'
|
||||
_API_BASE_URL = 'https://api.curiositystream.com/v2/collections/'
|
||||
_TESTS = [{
|
||||
'url': 'https://curiositystream.com/collections/86',
|
||||
@@ -155,7 +182,17 @@ class CuriosityStreamCollectionIE(CuriosityStreamBaseIE):
|
||||
},
|
||||
'playlist_mincount': 7,
|
||||
}, {
|
||||
'url': 'https://app.curiositystream.com/collection/2',
|
||||
'url': 'https://curiositystream.com/collections/36',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
|
||||
class CuriosityStreamSeriesIE(CuriosityStreamCollectionBaseIE):
|
||||
IE_NAME = 'curiositystream:series'
|
||||
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/(?:series|collection)/(?P<id>\d+)'
|
||||
_API_BASE_URL = 'https://api.curiositystream.com/v2/series/'
|
||||
_TESTS = [{
|
||||
'url': 'https://curiositystream.com/series/2',
|
||||
'info_dict': {
|
||||
'id': '2',
|
||||
'title': 'Curious Minds: The Internet',
|
||||
@@ -163,23 +200,6 @@ class CuriosityStreamCollectionIE(CuriosityStreamBaseIE):
|
||||
},
|
||||
'playlist_mincount': 16,
|
||||
}, {
|
||||
'url': 'https://curiositystream.com/series/2',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://curiositystream.com/collections/36',
|
||||
'url': 'https://curiositystream.com/collection/2',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
collection_id = self._match_id(url)
|
||||
collection = self._call_api(collection_id, collection_id)
|
||||
entries = []
|
||||
for media in collection.get('media', []):
|
||||
media_id = compat_str(media.get('id'))
|
||||
media_type, ie = ('series', CuriosityStreamCollectionIE) if media.get('is_collection') else ('video', CuriosityStreamIE)
|
||||
entries.append(self.url_result(
|
||||
'https://curiositystream.com/%s/%s' % (media_type, media_id),
|
||||
ie=ie.ie_key(), video_id=media_id))
|
||||
return self.playlist_result(
|
||||
entries, collection_id,
|
||||
collection.get('title'), collection.get('description'))
|
||||
|
||||
@@ -305,7 +305,7 @@ def _real_extract(self, url):
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'title': title,
|
||||
'description': clean_html(media.get('description')),
|
||||
'thumbnails': thumbnails,
|
||||
'duration': int_or_none(metadata.get('duration')) or None,
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
|
||||
from .dplay import DPlayIE
|
||||
|
||||
|
||||
class DiscoveryNetworksDeIE(DPlayIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:tlc|dmax)\.de|dplay\.co\.uk)/(?:programme|show|sendungen)/(?P<programme>[^/]+)/(?:video/)?(?P<alternate_id>[^/]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.tlc.de/programme/breaking-amish/video/die-welt-da-drauen/DCB331270001100',
|
||||
'info_dict': {
|
||||
'id': '78867',
|
||||
'ext': 'mp4',
|
||||
'title': 'Die Welt da draußen',
|
||||
'description': 'md5:61033c12b73286e409d99a41742ef608',
|
||||
'timestamp': 1554069600,
|
||||
'upload_date': '20190331',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.dmax.de/programme/dmax-highlights/video/tuning-star-sidney-hoffmann-exklusiv-bei-dmax/191023082312316',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.dplay.co.uk/show/ghost-adventures/video/hotel-leger-103620/EHD_280313B',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://tlc.de/sendungen/breaking-amish/die-welt-da-drauen/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, programme, alternate_id = self._match_valid_url(url).groups()
|
||||
country = 'GB' if domain == 'dplay.co.uk' else 'DE'
|
||||
realm = 'questuk' if country == 'GB' else domain.replace('.', '')
|
||||
return self._get_disco_api_info(
|
||||
url, '%s/%s' % (programme, alternate_id),
|
||||
'sonic-eu1-prod.disco-api.com', realm, country)
|
||||
@@ -1,97 +0,0 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
|
||||
from ..compat import compat_str
|
||||
from ..utils import try_get
|
||||
from .common import InfoExtractor
|
||||
from .dplay import DPlayIE
|
||||
|
||||
|
||||
class DiscoveryPlusIndiaIE(DPlayIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.in/videos?' + DPlayIE._PATH_REGEX
|
||||
_TESTS = [{
|
||||
'url': 'https://www.discoveryplus.in/videos/how-do-they-do-it/fugu-and-more?seasonId=8&type=EPISODE',
|
||||
'info_dict': {
|
||||
'id': '27104',
|
||||
'ext': 'mp4',
|
||||
'display_id': 'how-do-they-do-it/fugu-and-more',
|
||||
'title': 'Fugu and More',
|
||||
'description': 'The Japanese catch, prepare and eat the deadliest fish on the planet.',
|
||||
'duration': 1319,
|
||||
'timestamp': 1582309800,
|
||||
'upload_date': '20200221',
|
||||
'series': 'How Do They Do It?',
|
||||
'season_number': 8,
|
||||
'episode_number': 2,
|
||||
'creator': 'Discovery Channel',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Cookies (not necessarily logged in) are needed'
|
||||
}]
|
||||
|
||||
def _update_disco_api_headers(self, headers, disco_base, display_id, realm):
|
||||
headers['x-disco-params'] = 'realm=%s' % realm
|
||||
headers['x-disco-client'] = 'WEB:UNKNOWN:dplus-india:17.0.0'
|
||||
|
||||
def _download_video_playback_info(self, disco_base, video_id, headers):
|
||||
return self._download_json(
|
||||
disco_base + 'playback/v3/videoPlaybackInfo',
|
||||
video_id, headers=headers, data=json.dumps({
|
||||
'deviceInfo': {
|
||||
'adBlocker': False,
|
||||
},
|
||||
'videoId': video_id,
|
||||
}).encode('utf-8'))['data']['attributes']['streaming']
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
return self._get_disco_api_info(
|
||||
url, display_id, 'ap2-prod-direct.discoveryplus.in', 'dplusindia', 'in')
|
||||
|
||||
|
||||
class DiscoveryPlusIndiaShowIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.in/show/(?P<show_name>[^/]+)/?(?:[?#]|$)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.discoveryplus.in/show/how-do-they-do-it',
|
||||
'playlist_mincount': 140,
|
||||
'info_dict': {
|
||||
'id': 'how-do-they-do-it',
|
||||
},
|
||||
}]
|
||||
|
||||
def _entries(self, show_name):
|
||||
headers = {
|
||||
'x-disco-client': 'WEB:UNKNOWN:dplus-india:prod',
|
||||
'x-disco-params': 'realm=dplusindia',
|
||||
'referer': 'https://www.discoveryplus.in/',
|
||||
}
|
||||
show_url = 'https://ap2-prod-direct.discoveryplus.in/cms/routes/show/{}?include=default'.format(show_name)
|
||||
show_json = self._download_json(show_url,
|
||||
video_id=show_name,
|
||||
headers=headers)['included'][4]['attributes']['component']
|
||||
show_id = show_json['mandatoryParams'].split('=')[-1]
|
||||
season_url = 'https://ap2-prod-direct.discoveryplus.in/content/videos?sort=episodeNumber&filter[seasonNumber]={}&filter[show.id]={}&page[size]=100&page[number]={}'
|
||||
for season in show_json['filters'][0]['options']:
|
||||
season_id = season['id']
|
||||
total_pages, page_num = 1, 0
|
||||
while page_num < total_pages:
|
||||
season_json = self._download_json(season_url.format(season_id, show_id, compat_str(page_num + 1)),
|
||||
video_id=show_id, headers=headers,
|
||||
note='Downloading JSON metadata%s' % (' page %d' % page_num if page_num else ''))
|
||||
if page_num == 0:
|
||||
total_pages = try_get(season_json, lambda x: x['meta']['totalPages'], int) or 1
|
||||
episodes_json = season_json['data']
|
||||
for episode in episodes_json:
|
||||
video_id = episode['attributes']['path']
|
||||
yield self.url_result(
|
||||
'https://discoveryplus.in/videos/%s' % video_id,
|
||||
ie=DiscoveryPlusIndiaIE.ie_key(), video_id=video_id)
|
||||
page_num += 1
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_name = self._match_valid_url(url).group('show_name')
|
||||
return self.playlist_result(self._entries(show_name), playlist_id=show_name)
|
||||
@@ -84,7 +84,7 @@ def _real_extract(self, url):
|
||||
self._sort_formats(formats)
|
||||
return {
|
||||
'id': display_name,
|
||||
'title': self._live_title(title),
|
||||
'title': title,
|
||||
'uploader': display_name,
|
||||
'uploader_id': username,
|
||||
'formats': formats,
|
||||
|
||||
@@ -105,7 +105,7 @@ def _real_extract(self, url):
|
||||
'aid': 'pcclient'
|
||||
})['data']['live_url']
|
||||
|
||||
title = self._live_title(unescapeHTML(room['room_name']))
|
||||
title = unescapeHTML(room['room_name'])
|
||||
description = room.get('show_details')
|
||||
thumbnail = room.get('room_src')
|
||||
uploader = room.get('nickname')
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import uuid
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_HTTPError
|
||||
@@ -11,12 +12,172 @@
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
strip_or_none,
|
||||
try_get,
|
||||
unified_timestamp,
|
||||
)
|
||||
|
||||
|
||||
class DPlayIE(InfoExtractor):
|
||||
class DPlayBaseIE(InfoExtractor):
|
||||
_PATH_REGEX = r'/(?P<id>[^/]+/[^/?#]+)'
|
||||
_auth_token_cache = {}
|
||||
|
||||
def _get_auth(self, disco_base, display_id, realm, needs_device_id=True):
|
||||
key = (disco_base, realm)
|
||||
st = self._get_cookies(disco_base).get('st')
|
||||
token = (st and st.value) or self._auth_token_cache.get(key)
|
||||
|
||||
if not token:
|
||||
query = {'realm': realm}
|
||||
if needs_device_id:
|
||||
query['deviceId'] = uuid.uuid4().hex
|
||||
token = self._download_json(
|
||||
disco_base + 'token', display_id, 'Downloading token',
|
||||
query=query)['data']['attributes']['token']
|
||||
|
||||
# Save cache only if cookies are not being set
|
||||
if not self._get_cookies(disco_base).get('st'):
|
||||
self._auth_token_cache[key] = token
|
||||
|
||||
return f'Bearer {token}'
|
||||
|
||||
def _process_errors(self, e, geo_countries):
|
||||
info = self._parse_json(e.cause.read().decode('utf-8'), None)
|
||||
error = info['errors'][0]
|
||||
error_code = error.get('code')
|
||||
if error_code == 'access.denied.geoblocked':
|
||||
self.raise_geo_restricted(countries=geo_countries)
|
||||
elif error_code in ('access.denied.missingpackage', 'invalid.token'):
|
||||
raise ExtractorError(
|
||||
'This video is only available for registered users. You may want to use --cookies.', expected=True)
|
||||
raise ExtractorError(info['errors'][0]['detail'], expected=True)
|
||||
|
||||
def _update_disco_api_headers(self, headers, disco_base, display_id, realm):
|
||||
headers['Authorization'] = self._get_auth(disco_base, display_id, realm, False)
|
||||
|
||||
def _download_video_playback_info(self, disco_base, video_id, headers):
|
||||
streaming = self._download_json(
|
||||
disco_base + 'playback/videoPlaybackInfo/' + video_id,
|
||||
video_id, headers=headers)['data']['attributes']['streaming']
|
||||
streaming_list = []
|
||||
for format_id, format_dict in streaming.items():
|
||||
streaming_list.append({
|
||||
'type': format_id,
|
||||
'url': format_dict.get('url'),
|
||||
})
|
||||
return streaming_list
|
||||
|
||||
def _get_disco_api_info(self, url, display_id, disco_host, realm, country, domain=''):
|
||||
geo_countries = [country.upper()]
|
||||
self._initialize_geo_bypass({
|
||||
'countries': geo_countries,
|
||||
})
|
||||
disco_base = 'https://%s/' % disco_host
|
||||
headers = {
|
||||
'Referer': url,
|
||||
}
|
||||
self._update_disco_api_headers(headers, disco_base, display_id, realm)
|
||||
try:
|
||||
video = self._download_json(
|
||||
disco_base + 'content/videos/' + display_id, display_id,
|
||||
headers=headers, query={
|
||||
'fields[channel]': 'name',
|
||||
'fields[image]': 'height,src,width',
|
||||
'fields[show]': 'name',
|
||||
'fields[tag]': 'name',
|
||||
'fields[video]': 'description,episodeNumber,name,publishStart,seasonNumber,videoDuration',
|
||||
'include': 'images,primaryChannel,show,tags'
|
||||
})
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
|
||||
self._process_errors(e, geo_countries)
|
||||
raise
|
||||
video_id = video['data']['id']
|
||||
info = video['data']['attributes']
|
||||
title = info['name'].strip()
|
||||
formats = []
|
||||
subtitles = {}
|
||||
try:
|
||||
streaming = self._download_video_playback_info(
|
||||
disco_base, video_id, headers)
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
||||
self._process_errors(e, geo_countries)
|
||||
raise
|
||||
for format_dict in streaming:
|
||||
if not isinstance(format_dict, dict):
|
||||
continue
|
||||
format_url = format_dict.get('url')
|
||||
if not format_url:
|
||||
continue
|
||||
format_id = format_dict.get('type')
|
||||
ext = determine_ext(format_url)
|
||||
if format_id == 'dash' or ext == 'mpd':
|
||||
dash_fmts, dash_subs = self._extract_mpd_formats_and_subtitles(
|
||||
format_url, display_id, mpd_id='dash', fatal=False)
|
||||
formats.extend(dash_fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, dash_subs)
|
||||
elif format_id == 'hls' or ext == 'm3u8':
|
||||
m3u8_fmts, m3u8_subs = self._extract_m3u8_formats_and_subtitles(
|
||||
format_url, display_id, 'mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id='hls',
|
||||
fatal=False)
|
||||
formats.extend(m3u8_fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, m3u8_subs)
|
||||
else:
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'format_id': format_id,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
creator = series = None
|
||||
tags = []
|
||||
thumbnails = []
|
||||
included = video.get('included') or []
|
||||
if isinstance(included, list):
|
||||
for e in included:
|
||||
attributes = e.get('attributes')
|
||||
if not attributes:
|
||||
continue
|
||||
e_type = e.get('type')
|
||||
if e_type == 'channel':
|
||||
creator = attributes.get('name')
|
||||
elif e_type == 'image':
|
||||
src = attributes.get('src')
|
||||
if src:
|
||||
thumbnails.append({
|
||||
'url': src,
|
||||
'width': int_or_none(attributes.get('width')),
|
||||
'height': int_or_none(attributes.get('height')),
|
||||
})
|
||||
if e_type == 'show':
|
||||
series = attributes.get('name')
|
||||
elif e_type == 'tag':
|
||||
name = attributes.get('name')
|
||||
if name:
|
||||
tags.append(name)
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'description': strip_or_none(info.get('description')),
|
||||
'duration': float_or_none(info.get('videoDuration'), 1000),
|
||||
'timestamp': unified_timestamp(info.get('publishStart')),
|
||||
'series': series,
|
||||
'season_number': int_or_none(info.get('seasonNumber')),
|
||||
'episode_number': int_or_none(info.get('episodeNumber')),
|
||||
'creator': creator,
|
||||
'tags': tags,
|
||||
'thumbnails': thumbnails,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'http_headers': {
|
||||
'referer': domain,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class DPlayIE(DPlayBaseIE):
|
||||
_VALID_URL = r'''(?x)https?://
|
||||
(?P<domain>
|
||||
(?:www\.)?(?P<host>d
|
||||
@@ -26,7 +187,7 @@ class DPlayIE(InfoExtractor):
|
||||
)
|
||||
)|
|
||||
(?P<subdomain_country>es|it)\.dplay\.com
|
||||
)/[^/]+''' + _PATH_REGEX
|
||||
)/[^/]+''' + DPlayBaseIE._PATH_REGEX
|
||||
|
||||
_TESTS = [{
|
||||
# non geo restricted, via secure api, unsigned download hls URL
|
||||
@@ -150,138 +311,6 @@ class DPlayIE(InfoExtractor):
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _process_errors(self, e, geo_countries):
|
||||
info = self._parse_json(e.cause.read().decode('utf-8'), None)
|
||||
error = info['errors'][0]
|
||||
error_code = error.get('code')
|
||||
if error_code == 'access.denied.geoblocked':
|
||||
self.raise_geo_restricted(countries=geo_countries)
|
||||
elif error_code in ('access.denied.missingpackage', 'invalid.token'):
|
||||
raise ExtractorError(
|
||||
'This video is only available for registered users. You may want to use --cookies.', expected=True)
|
||||
raise ExtractorError(info['errors'][0]['detail'], expected=True)
|
||||
|
||||
def _update_disco_api_headers(self, headers, disco_base, display_id, realm):
|
||||
headers['Authorization'] = 'Bearer ' + self._download_json(
|
||||
disco_base + 'token', display_id, 'Downloading token',
|
||||
query={
|
||||
'realm': realm,
|
||||
})['data']['attributes']['token']
|
||||
|
||||
def _download_video_playback_info(self, disco_base, video_id, headers):
|
||||
streaming = self._download_json(
|
||||
disco_base + 'playback/videoPlaybackInfo/' + video_id,
|
||||
video_id, headers=headers)['data']['attributes']['streaming']
|
||||
streaming_list = []
|
||||
for format_id, format_dict in streaming.items():
|
||||
streaming_list.append({
|
||||
'type': format_id,
|
||||
'url': format_dict.get('url'),
|
||||
})
|
||||
return streaming_list
|
||||
|
||||
def _get_disco_api_info(self, url, display_id, disco_host, realm, country):
|
||||
geo_countries = [country.upper()]
|
||||
self._initialize_geo_bypass({
|
||||
'countries': geo_countries,
|
||||
})
|
||||
disco_base = 'https://%s/' % disco_host
|
||||
headers = {
|
||||
'Referer': url,
|
||||
}
|
||||
self._update_disco_api_headers(headers, disco_base, display_id, realm)
|
||||
try:
|
||||
video = self._download_json(
|
||||
disco_base + 'content/videos/' + display_id, display_id,
|
||||
headers=headers, query={
|
||||
'fields[channel]': 'name',
|
||||
'fields[image]': 'height,src,width',
|
||||
'fields[show]': 'name',
|
||||
'fields[tag]': 'name',
|
||||
'fields[video]': 'description,episodeNumber,name,publishStart,seasonNumber,videoDuration',
|
||||
'include': 'images,primaryChannel,show,tags'
|
||||
})
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
|
||||
self._process_errors(e, geo_countries)
|
||||
raise
|
||||
video_id = video['data']['id']
|
||||
info = video['data']['attributes']
|
||||
title = info['name'].strip()
|
||||
formats = []
|
||||
try:
|
||||
streaming = self._download_video_playback_info(
|
||||
disco_base, video_id, headers)
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
||||
self._process_errors(e, geo_countries)
|
||||
raise
|
||||
for format_dict in streaming:
|
||||
if not isinstance(format_dict, dict):
|
||||
continue
|
||||
format_url = format_dict.get('url')
|
||||
if not format_url:
|
||||
continue
|
||||
format_id = format_dict.get('type')
|
||||
ext = determine_ext(format_url)
|
||||
if format_id == 'dash' or ext == 'mpd':
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
format_url, display_id, mpd_id='dash', fatal=False))
|
||||
elif format_id == 'hls' or ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
format_url, display_id, 'mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id='hls',
|
||||
fatal=False))
|
||||
else:
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'format_id': format_id,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
creator = series = None
|
||||
tags = []
|
||||
thumbnails = []
|
||||
included = video.get('included') or []
|
||||
if isinstance(included, list):
|
||||
for e in included:
|
||||
attributes = e.get('attributes')
|
||||
if not attributes:
|
||||
continue
|
||||
e_type = e.get('type')
|
||||
if e_type == 'channel':
|
||||
creator = attributes.get('name')
|
||||
elif e_type == 'image':
|
||||
src = attributes.get('src')
|
||||
if src:
|
||||
thumbnails.append({
|
||||
'url': src,
|
||||
'width': int_or_none(attributes.get('width')),
|
||||
'height': int_or_none(attributes.get('height')),
|
||||
})
|
||||
if e_type == 'show':
|
||||
series = attributes.get('name')
|
||||
elif e_type == 'tag':
|
||||
name = attributes.get('name')
|
||||
if name:
|
||||
tags.append(name)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'description': strip_or_none(info.get('description')),
|
||||
'duration': float_or_none(info.get('videoDuration'), 1000),
|
||||
'timestamp': unified_timestamp(info.get('publishStart')),
|
||||
'series': series,
|
||||
'season_number': int_or_none(info.get('seasonNumber')),
|
||||
'episode_number': int_or_none(info.get('episodeNumber')),
|
||||
'creator': creator,
|
||||
'tags': tags,
|
||||
'thumbnails': thumbnails,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = self._match_valid_url(url)
|
||||
display_id = mobj.group('id')
|
||||
@@ -289,11 +318,11 @@ def _real_extract(self, url):
|
||||
country = mobj.group('country') or mobj.group('subdomain_country') or mobj.group('plus_country')
|
||||
host = 'disco-api.' + domain if domain[0] == 'd' else 'eu2-prod.disco-api.com'
|
||||
return self._get_disco_api_info(
|
||||
url, display_id, host, 'dplay' + country, country)
|
||||
url, display_id, host, 'dplay' + country, country, domain)
|
||||
|
||||
|
||||
class HGTVDeIE(DPlayIE):
|
||||
_VALID_URL = r'https?://de\.hgtv\.com/sendungen' + DPlayIE._PATH_REGEX
|
||||
class HGTVDeIE(DPlayBaseIE):
|
||||
_VALID_URL = r'https?://de\.hgtv\.com/sendungen' + DPlayBaseIE._PATH_REGEX
|
||||
_TESTS = [{
|
||||
'url': 'https://de.hgtv.com/sendungen/tiny-house-klein-aber-oho/wer-braucht-schon-eine-toilette/',
|
||||
'info_dict': {
|
||||
@@ -318,8 +347,8 @@ def _real_extract(self, url):
|
||||
url, display_id, 'eu1-prod.disco-api.com', 'hgtv', 'de')
|
||||
|
||||
|
||||
class DiscoveryPlusIE(DPlayIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.com/(?:\w{2}/)?video' + DPlayIE._PATH_REGEX
|
||||
class DiscoveryPlusIE(DPlayBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.com/(?!it/)(?:\w{2}/)?video' + DPlayBaseIE._PATH_REGEX
|
||||
_TESTS = [{
|
||||
'url': 'https://www.discoveryplus.com/video/property-brothers-forever-home/food-and-family',
|
||||
'info_dict': {
|
||||
@@ -369,7 +398,7 @@ def _real_extract(self, url):
|
||||
|
||||
|
||||
class ScienceChannelIE(DiscoveryPlusIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?sciencechannel\.com/video' + DPlayIE._PATH_REGEX
|
||||
_VALID_URL = r'https?://(?:www\.)?sciencechannel\.com/video' + DPlayBaseIE._PATH_REGEX
|
||||
_TESTS = [{
|
||||
'url': 'https://www.sciencechannel.com/video/strangest-things-science-atve-us/nazi-mystery-machine',
|
||||
'info_dict': {
|
||||
@@ -389,7 +418,7 @@ class ScienceChannelIE(DiscoveryPlusIE):
|
||||
|
||||
|
||||
class DIYNetworkIE(DiscoveryPlusIE):
|
||||
_VALID_URL = r'https?://(?:watch\.)?diynetwork\.com/video' + DPlayIE._PATH_REGEX
|
||||
_VALID_URL = r'https?://(?:watch\.)?diynetwork\.com/video' + DPlayBaseIE._PATH_REGEX
|
||||
_TESTS = [{
|
||||
'url': 'https://watch.diynetwork.com/video/pool-kings-diy-network/bringing-beach-life-to-texas',
|
||||
'info_dict': {
|
||||
@@ -409,7 +438,7 @@ class DIYNetworkIE(DiscoveryPlusIE):
|
||||
|
||||
|
||||
class AnimalPlanetIE(DiscoveryPlusIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?animalplanet\.com/video' + DPlayIE._PATH_REGEX
|
||||
_VALID_URL = r'https?://(?:www\.)?animalplanet\.com/video' + DPlayBaseIE._PATH_REGEX
|
||||
_TESTS = [{
|
||||
'url': 'https://www.animalplanet.com/video/north-woods-law-animal-planet/squirrel-showdown',
|
||||
'info_dict': {
|
||||
@@ -426,3 +455,171 @@ class AnimalPlanetIE(DiscoveryPlusIE):
|
||||
|
||||
_PRODUCT = 'apl'
|
||||
_API_URL = 'us1-prod-direct.animalplanet.com'
|
||||
|
||||
|
||||
class DiscoveryPlusIndiaIE(DPlayBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.in/videos?' + DPlayBaseIE._PATH_REGEX
|
||||
_TESTS = [{
|
||||
'url': 'https://www.discoveryplus.in/videos/how-do-they-do-it/fugu-and-more?seasonId=8&type=EPISODE',
|
||||
'info_dict': {
|
||||
'id': '27104',
|
||||
'ext': 'mp4',
|
||||
'display_id': 'how-do-they-do-it/fugu-and-more',
|
||||
'title': 'Fugu and More',
|
||||
'description': 'The Japanese catch, prepare and eat the deadliest fish on the planet.',
|
||||
'duration': 1319,
|
||||
'timestamp': 1582309800,
|
||||
'upload_date': '20200221',
|
||||
'series': 'How Do They Do It?',
|
||||
'season_number': 8,
|
||||
'episode_number': 2,
|
||||
'creator': 'Discovery Channel',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}]
|
||||
|
||||
def _update_disco_api_headers(self, headers, disco_base, display_id, realm):
|
||||
headers.update({
|
||||
'x-disco-params': 'realm=%s' % realm,
|
||||
'x-disco-client': 'WEB:UNKNOWN:dplus-india:17.0.0',
|
||||
'Authorization': self._get_auth(disco_base, display_id, realm),
|
||||
})
|
||||
|
||||
def _download_video_playback_info(self, disco_base, video_id, headers):
|
||||
return self._download_json(
|
||||
disco_base + 'playback/v3/videoPlaybackInfo',
|
||||
video_id, headers=headers, data=json.dumps({
|
||||
'deviceInfo': {
|
||||
'adBlocker': False,
|
||||
},
|
||||
'videoId': video_id,
|
||||
}).encode('utf-8'))['data']['attributes']['streaming']
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
return self._get_disco_api_info(
|
||||
url, display_id, 'ap2-prod-direct.discoveryplus.in', 'dplusindia', 'in', 'https://www.discoveryplus.in/')
|
||||
|
||||
|
||||
class DiscoveryNetworksDeIE(DPlayBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:tlc|dmax)\.de|dplay\.co\.uk)/(?:programme|show|sendungen)/(?P<programme>[^/]+)/(?:video/)?(?P<alternate_id>[^/]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.tlc.de/programme/breaking-amish/video/die-welt-da-drauen/DCB331270001100',
|
||||
'info_dict': {
|
||||
'id': '78867',
|
||||
'ext': 'mp4',
|
||||
'title': 'Die Welt da draußen',
|
||||
'description': 'md5:61033c12b73286e409d99a41742ef608',
|
||||
'timestamp': 1554069600,
|
||||
'upload_date': '20190331',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.dmax.de/programme/dmax-highlights/video/tuning-star-sidney-hoffmann-exklusiv-bei-dmax/191023082312316',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.dplay.co.uk/show/ghost-adventures/video/hotel-leger-103620/EHD_280313B',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://tlc.de/sendungen/breaking-amish/die-welt-da-drauen/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, programme, alternate_id = self._match_valid_url(url).groups()
|
||||
country = 'GB' if domain == 'dplay.co.uk' else 'DE'
|
||||
realm = 'questuk' if country == 'GB' else domain.replace('.', '')
|
||||
return self._get_disco_api_info(
|
||||
url, '%s/%s' % (programme, alternate_id),
|
||||
'sonic-eu1-prod.disco-api.com', realm, country)
|
||||
|
||||
|
||||
class DiscoveryPlusShowBaseIE(DPlayBaseIE):
|
||||
|
||||
def _entries(self, show_name):
|
||||
headers = {
|
||||
'x-disco-client': self._X_CLIENT,
|
||||
'x-disco-params': f'realm={self._REALM}',
|
||||
'referer': self._DOMAIN,
|
||||
'Authentication': self._get_auth(self._BASE_API, None, self._REALM),
|
||||
}
|
||||
show_json = self._download_json(
|
||||
f'{self._BASE_API}cms/routes/{self._SHOW_STR}/{show_name}?include=default',
|
||||
video_id=show_name, headers=headers)['included'][self._INDEX]['attributes']['component']
|
||||
show_id = show_json['mandatoryParams'].split('=')[-1]
|
||||
season_url = self._BASE_API + 'content/videos?sort=episodeNumber&filter[seasonNumber]={}&filter[show.id]={}&page[size]=100&page[number]={}'
|
||||
for season in show_json['filters'][0]['options']:
|
||||
season_id = season['id']
|
||||
total_pages, page_num = 1, 0
|
||||
while page_num < total_pages:
|
||||
season_json = self._download_json(
|
||||
season_url.format(season_id, show_id, str(page_num + 1)), show_name, headers=headers,
|
||||
note='Downloading season %s JSON metadata%s' % (season_id, ' page %d' % page_num if page_num else ''))
|
||||
if page_num == 0:
|
||||
total_pages = try_get(season_json, lambda x: x['meta']['totalPages'], int) or 1
|
||||
episodes_json = season_json['data']
|
||||
for episode in episodes_json:
|
||||
video_path = episode['attributes']['path']
|
||||
yield self.url_result(
|
||||
'%svideos/%s' % (self._DOMAIN, video_path),
|
||||
ie=self._VIDEO_IE.ie_key(), video_id=episode.get('id') or video_path)
|
||||
page_num += 1
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_name = self._match_valid_url(url).group('show_name')
|
||||
return self.playlist_result(self._entries(show_name), playlist_id=show_name)
|
||||
|
||||
|
||||
class DiscoveryPlusItalyIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.com/it/video' + DPlayBaseIE._PATH_REGEX
|
||||
_TESTS = [{
|
||||
'url': 'https://www.discoveryplus.com/it/video/i-signori-della-neve/stagione-2-episodio-1-i-preparativi',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
return self.url_result(f'https://discoveryplus.it/video/{video_id}', DPlayIE.ie_key(), video_id)
|
||||
|
||||
|
||||
class DiscoveryPlusItalyShowIE(DiscoveryPlusShowBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.it/programmi/(?P<show_name>[^/]+)/?(?:[?#]|$)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.discoveryplus.it/programmi/deal-with-it-stai-al-gioco',
|
||||
'playlist_mincount': 168,
|
||||
'info_dict': {
|
||||
'id': 'deal-with-it-stai-al-gioco',
|
||||
},
|
||||
}]
|
||||
|
||||
_BASE_API = 'https://disco-api.discoveryplus.it/'
|
||||
_DOMAIN = 'https://www.discoveryplus.it/'
|
||||
_X_CLIENT = 'WEB:UNKNOWN:dplay-client:2.6.0'
|
||||
_REALM = 'dplayit'
|
||||
_SHOW_STR = 'programmi'
|
||||
_INDEX = 1
|
||||
_VIDEO_IE = DPlayIE
|
||||
|
||||
|
||||
class DiscoveryPlusIndiaShowIE(DiscoveryPlusShowBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.in/show/(?P<show_name>[^/]+)/?(?:[?#]|$)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.discoveryplus.in/show/how-do-they-do-it',
|
||||
'playlist_mincount': 140,
|
||||
'info_dict': {
|
||||
'id': 'how-do-they-do-it',
|
||||
},
|
||||
}]
|
||||
|
||||
_BASE_API = 'https://ap2-prod-direct.discoveryplus.in/'
|
||||
_DOMAIN = 'https://www.discoveryplus.in/'
|
||||
_X_CLIENT = 'WEB:UNKNOWN:dplus-india:prod'
|
||||
_REALM = 'dplusindia'
|
||||
_SHOW_STR = 'show'
|
||||
_INDEX = 4
|
||||
_VIDEO_IE = DiscoveryPlusIndiaIE
|
||||
|
||||
212
yt_dlp/extractor/dropout.py
Normal file
212
yt_dlp/extractor/dropout.py
Normal file
@@ -0,0 +1,212 @@
|
||||
# coding: utf-8
|
||||
from .common import InfoExtractor
|
||||
from .vimeo import VHXEmbedIE
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
ExtractorError,
|
||||
get_element_by_class,
|
||||
get_element_by_id,
|
||||
get_elements_by_class,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
unified_strdate,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
|
||||
class DropoutIE(InfoExtractor):
|
||||
_LOGIN_URL = 'https://www.dropout.tv/login'
|
||||
_NETRC_MACHINE = 'dropout'
|
||||
|
||||
_VALID_URL = r'https?://(?:www\.)?dropout\.tv/(?:[^/]+/)*videos/(?P<id>[^/]+)/?$'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'https://www.dropout.tv/game-changer/season:2/videos/yes-or-no',
|
||||
'note': 'Episode in a series',
|
||||
'md5': '5e000fdfd8d8fa46ff40456f1c2af04a',
|
||||
'info_dict': {
|
||||
'id': '738153',
|
||||
'display_id': 'yes-or-no',
|
||||
'ext': 'mp4',
|
||||
'title': 'Yes or No',
|
||||
'description': 'Ally, Brennan, and Zac are asked a simple question, but is there a correct answer?',
|
||||
'release_date': '20200508',
|
||||
'thumbnail': 'https://vhx.imgix.net/chuncensoredstaging/assets/351e3f24-c4a3-459a-8b79-dc80f1e5b7fd.jpg',
|
||||
'series': 'Game Changer',
|
||||
'season_number': 2,
|
||||
'season': 'Season 2',
|
||||
'episode_number': 6,
|
||||
'episode': 'Yes or No',
|
||||
'duration': 1180,
|
||||
'uploader_id': 'user80538407',
|
||||
'uploader_url': 'https://vimeo.com/user80538407',
|
||||
'uploader': 'OTT Videos'
|
||||
},
|
||||
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest']
|
||||
},
|
||||
{
|
||||
'url': 'https://www.dropout.tv/dimension-20-fantasy-high/season:1/videos/episode-1',
|
||||
'note': 'Episode in a series (missing release_date)',
|
||||
'md5': '712caf7c191f1c47c8f1879520c2fa5c',
|
||||
'info_dict': {
|
||||
'id': '320562',
|
||||
'display_id': 'episode-1',
|
||||
'ext': 'mp4',
|
||||
'title': 'The Beginning Begins',
|
||||
'description': 'The cast introduces their PCs, including a neurotic elf, a goblin PI, and a corn-worshipping cleric.',
|
||||
'thumbnail': 'https://vhx.imgix.net/chuncensoredstaging/assets/4421ed0d-f630-4c88-9004-5251b2b8adfa.jpg',
|
||||
'series': 'Dimension 20: Fantasy High',
|
||||
'season_number': 1,
|
||||
'season': 'Season 1',
|
||||
'episode_number': 1,
|
||||
'episode': 'The Beginning Begins',
|
||||
'duration': 6838,
|
||||
'uploader_id': 'user80538407',
|
||||
'uploader_url': 'https://vimeo.com/user80538407',
|
||||
'uploader': 'OTT Videos'
|
||||
},
|
||||
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest']
|
||||
},
|
||||
{
|
||||
'url': 'https://www.dropout.tv/videos/misfits-magic-holiday-special',
|
||||
'note': 'Episode not in a series',
|
||||
'md5': 'c30fa18999c5880d156339f13c953a26',
|
||||
'info_dict': {
|
||||
'id': '1915774',
|
||||
'display_id': 'misfits-magic-holiday-special',
|
||||
'ext': 'mp4',
|
||||
'title': 'Misfits & Magic Holiday Special',
|
||||
'description': 'The magical misfits spend Christmas break at Gowpenny, with an unwelcome visitor.',
|
||||
'release_date': '20211215',
|
||||
'thumbnail': 'https://vhx.imgix.net/chuncensoredstaging/assets/d91ea8a6-b250-42ed-907e-b30fb1c65176-8e24b8e5.jpg',
|
||||
'duration': 11698,
|
||||
'uploader_id': 'user80538407',
|
||||
'uploader_url': 'https://vimeo.com/user80538407',
|
||||
'uploader': 'OTT Videos'
|
||||
},
|
||||
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest']
|
||||
}
|
||||
]
|
||||
|
||||
def _get_authenticity_token(self, display_id):
|
||||
signin_page = self._download_webpage(
|
||||
self._LOGIN_URL, display_id, note='Getting authenticity token')
|
||||
return self._html_search_regex(
|
||||
r'name=["\']authenticity_token["\'] value=["\'](.+?)["\']',
|
||||
signin_page, 'authenticity_token')
|
||||
|
||||
def _login(self, display_id):
|
||||
username, password = self._get_login_info()
|
||||
if not (username and password):
|
||||
self.raise_login_required(method='password')
|
||||
|
||||
response = self._download_webpage(
|
||||
self._LOGIN_URL, display_id, note='Logging in', data=urlencode_postdata({
|
||||
'email': username,
|
||||
'password': password,
|
||||
'authenticity_token': self._get_authenticity_token(display_id),
|
||||
'utf8': True
|
||||
}))
|
||||
|
||||
user_has_subscription = self._search_regex(
|
||||
r'user_has_subscription:\s*["\'](.+?)["\']', response, 'subscription status', default='none')
|
||||
if user_has_subscription.lower() == 'true':
|
||||
return response
|
||||
elif user_has_subscription.lower() == 'false':
|
||||
raise ExtractorError('Account is not subscribed')
|
||||
else:
|
||||
raise ExtractorError('Incorrect username/password')
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
try:
|
||||
self._login(display_id)
|
||||
webpage = self._download_webpage(url, display_id, note='Downloading video webpage')
|
||||
finally:
|
||||
self._download_webpage('https://www.dropout.tv/logout', display_id, note='Logging out')
|
||||
|
||||
embed_url = self._search_regex(r'embed_url:\s*["\'](.+?)["\']', webpage, 'embed url')
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
watch_info = get_element_by_id('watch-info', webpage) or ''
|
||||
|
||||
title = clean_html(get_element_by_class('video-title', watch_info))
|
||||
season_episode = get_element_by_class(
|
||||
'site-font-secondary-color', get_element_by_class('text', watch_info))
|
||||
episode_number = int_or_none(self._search_regex(
|
||||
r'Episode (\d+)', season_episode or '', 'episode', default=None))
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'ie_key': VHXEmbedIE.ie_key(),
|
||||
'url': embed_url,
|
||||
'id': self._search_regex(r'embed.vhx.tv/videos/(.+?)\?', embed_url, 'id'),
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'description': self._html_search_meta('description', webpage, fatal=False),
|
||||
'thumbnail': thumbnail.split('?')[0] if thumbnail else None, # Ignore crop/downscale
|
||||
'series': clean_html(get_element_by_class('series-title', watch_info)),
|
||||
'episode_number': episode_number,
|
||||
'episode': title if episode_number else None,
|
||||
'season_number': int_or_none(self._search_regex(
|
||||
r'Season (\d+),', season_episode or '', 'season', default=None)),
|
||||
'release_date': unified_strdate(self._search_regex(
|
||||
r'data-meta-field-name=["\']release_dates["\'] data-meta-field-value=["\'](.+?)["\']',
|
||||
watch_info, 'release date', default=None)),
|
||||
}
|
||||
|
||||
|
||||
class DropoutSeasonIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?dropout\.tv/(?P<id>[^\/$&?#]+)(?:/?$|/season:[0-9]+/?$)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'https://www.dropout.tv/dimension-20-fantasy-high/season:1',
|
||||
'note': 'Multi-season series with the season in the url',
|
||||
'playlist_count': 17,
|
||||
'info_dict': {
|
||||
'id': 'dimension-20-fantasy-high-season-1',
|
||||
'title': 'Dimension 20 Fantasy High - Season 1'
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'https://www.dropout.tv/dimension-20-fantasy-high',
|
||||
'note': 'Multi-season series with the season not in the url',
|
||||
'playlist_count': 17,
|
||||
'info_dict': {
|
||||
'id': 'dimension-20-fantasy-high-season-1',
|
||||
'title': 'Dimension 20 Fantasy High - Season 1'
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'https://www.dropout.tv/dimension-20-shriek-week',
|
||||
'note': 'Single-season series',
|
||||
'playlist_count': 4,
|
||||
'info_dict': {
|
||||
'id': 'dimension-20-shriek-week-season-1',
|
||||
'title': 'Dimension 20 Shriek Week - Season 1'
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
season_id = self._match_id(url)
|
||||
season_title = season_id.replace('-', ' ').title()
|
||||
webpage = self._download_webpage(url, season_id)
|
||||
|
||||
entries = [
|
||||
self.url_result(
|
||||
url=self._search_regex(r'<a href=["\'](.+?)["\'] class=["\']browse-item-link["\']',
|
||||
item, 'item_url'),
|
||||
ie=DropoutIE.ie_key()
|
||||
) for item in get_elements_by_class('js-collection-item', webpage)
|
||||
]
|
||||
|
||||
seasons = (get_element_by_class('select-dropdown-wrapper', webpage) or '').strip().replace('\n', '')
|
||||
current_season = self._search_regex(r'<option[^>]+selected>([^<]+)</option>',
|
||||
seasons, 'current_season', default='').strip()
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': join_nonempty(season_id, current_season.lower().replace(' ', '-')),
|
||||
'title': join_nonempty(season_title, current_season, delim=' - '),
|
||||
'entries': entries
|
||||
}
|
||||
@@ -321,7 +321,7 @@ def _real_extract(self, url):
|
||||
channel_data = self._download_json(
|
||||
'https://www.dr.dk/mu-online/api/1.0/channel/' + channel_id,
|
||||
channel_id)
|
||||
title = self._live_title(channel_data['Title'])
|
||||
title = channel_data['Title']
|
||||
|
||||
formats = []
|
||||
for streaming_server in channel_data.get('StreamingServers', []):
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
|
||||
class EpiconIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)(?:www\.)?epicon\.in/(?:documentaries|movies|tv-shows/[^/?#]+/[^/?#]+)/(?P<id>[^/?#]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?epicon\.in/(?:documentaries|movies|tv-shows/[^/?#]+/[^/?#]+)/(?P<id>[^/?#]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.epicon.in/documentaries/air-battle-of-srinagar',
|
||||
'info_dict': {
|
||||
@@ -84,7 +84,7 @@ def _real_extract(self, url):
|
||||
|
||||
|
||||
class EpiconSeriesIE(InfoExtractor):
|
||||
_VALID_URL = r'(?!.*season)(?:https?://)(?:www\.)?epicon\.in/tv-shows/(?P<id>[^/?#]+)'
|
||||
_VALID_URL = r'(?!.*season)https?://(?:www\.)?epicon\.in/tv-shows/(?P<id>[^/?#]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.epicon.in/tv-shows/1-of-something',
|
||||
'playlist_mincount': 5,
|
||||
|
||||
@@ -7,7 +7,9 @@
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
dict_get,
|
||||
int_or_none,
|
||||
unified_strdate,
|
||||
unified_timestamp,
|
||||
)
|
||||
|
||||
@@ -236,3 +238,44 @@ def _real_extract(self, url):
|
||||
webpage, 'embed url')
|
||||
|
||||
return self.url_result(embed_url, 'AbcNewsVideo')
|
||||
|
||||
|
||||
class ESPNCricInfoIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?espncricinfo\.com/video/[^#$&?/]+-(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.espncricinfo.com/video/finch-chasing-comes-with-risks-despite-world-cup-trend-1289135',
|
||||
'info_dict': {
|
||||
'id': '1289135',
|
||||
'ext': 'mp4',
|
||||
'title': 'Finch: Chasing comes with \'risks\' despite World Cup trend',
|
||||
'description': 'md5:ea32373303e25efbb146efdfc8a37829',
|
||||
'upload_date': '20211113',
|
||||
'duration': 96,
|
||||
},
|
||||
'params': {'skip_download': True}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
id = self._match_id(url)
|
||||
data_json = self._download_json(f'https://hs-consumer-api.espncricinfo.com/v1/pages/video/video-details?videoId={id}', id)['video']
|
||||
formats, subtitles = [], {}
|
||||
for item in data_json.get('playbacks') or []:
|
||||
if item.get('type') == 'HLS' and item.get('url'):
|
||||
m3u8_frmts, m3u8_subs = self._extract_m3u8_formats_and_subtitles(item['url'], id)
|
||||
formats.extend(m3u8_frmts)
|
||||
subtitles = self._merge_subtitles(subtitles, m3u8_subs)
|
||||
elif item.get('type') == 'AUDIO' and item.get('url'):
|
||||
formats.append({
|
||||
'url': item['url'],
|
||||
'vcodec': 'none',
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
return {
|
||||
'id': id,
|
||||
'title': data_json.get('title'),
|
||||
'description': data_json.get('summary'),
|
||||
'upload_date': unified_strdate(dict_get(data_json, ('publishedAt', 'recordedAt'))),
|
||||
'duration': data_json.get('duration'),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
|
||||
class EUScreenIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)(?:www\.)?euscreen\.eu/item.html\?id=(?P<id>[^&?$/]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?euscreen\.eu/item.html\?id=(?P<id>[^&?$/]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://euscreen.eu/item.html?id=EUS_0EBCBF356BFC4E12A014023BA41BD98C',
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
from .abc import (
|
||||
ABCIE,
|
||||
ABCIViewIE,
|
||||
ABCIViewShowSeriesIE,
|
||||
)
|
||||
from .abcnews import (
|
||||
AbcNewsIE,
|
||||
@@ -166,6 +167,7 @@
|
||||
BleacherReportIE,
|
||||
BleacherReportCMSIE,
|
||||
)
|
||||
from .blogger import BloggerIE
|
||||
from .bloomberg import BloombergIE
|
||||
from .bokecc import BokeCCIE
|
||||
from .bongacams import BongaCamsIE
|
||||
@@ -178,6 +180,7 @@
|
||||
)
|
||||
from .bravotv import BravoTVIE
|
||||
from .breakcom import BreakIE
|
||||
from .breitbart import BreitBartIE
|
||||
from .brightcove import (
|
||||
BrightcoveLegacyIE,
|
||||
BrightcoveNewIE,
|
||||
@@ -186,6 +189,7 @@
|
||||
from .buzzfeed import BuzzFeedIE
|
||||
from .byutv import BYUtvIE
|
||||
from .c56 import C56IE
|
||||
from .cableav import CableAVIE
|
||||
from .cam4 import CAM4IE
|
||||
from .camdemy import (
|
||||
CamdemyIE,
|
||||
@@ -193,6 +197,7 @@
|
||||
)
|
||||
from .cammodels import CamModelsIE
|
||||
from .camwithher import CamWithHerIE
|
||||
from .canalalpha import CanalAlphaIE
|
||||
from .canalplus import CanalplusIE
|
||||
from .canalc2 import Canalc2IE
|
||||
from .canvas import (
|
||||
@@ -291,6 +296,7 @@
|
||||
from .condenast import CondeNastIE
|
||||
from .contv import CONtvIE
|
||||
from .corus import CorusIE
|
||||
from .cozytv import CozyTVIE
|
||||
from .cracked import CrackedIE
|
||||
from .crackle import CrackleIE
|
||||
from .crooksandliars import CrooksAndLiarsIE
|
||||
@@ -307,7 +313,8 @@
|
||||
from .cultureunplugged import CultureUnpluggedIE
|
||||
from .curiositystream import (
|
||||
CuriosityStreamIE,
|
||||
CuriosityStreamCollectionIE,
|
||||
CuriosityStreamCollectionsIE,
|
||||
CuriosityStreamSeriesIE,
|
||||
)
|
||||
from .cwtv import CWTVIE
|
||||
from .dailymail import DailyMailIE
|
||||
@@ -336,10 +343,6 @@
|
||||
from .dfb import DFBIE
|
||||
from .dhm import DHMIE
|
||||
from .digg import DiggIE
|
||||
from .discoveryplusindia import (
|
||||
DiscoveryPlusIndiaIE,
|
||||
DiscoveryPlusIndiaShowIE,
|
||||
)
|
||||
from .dotsub import DotsubIE
|
||||
from .douyutv import (
|
||||
DouyuShowIE,
|
||||
@@ -351,7 +354,12 @@
|
||||
HGTVDeIE,
|
||||
ScienceChannelIE,
|
||||
DIYNetworkIE,
|
||||
AnimalPlanetIE
|
||||
AnimalPlanetIE,
|
||||
DiscoveryPlusIndiaIE,
|
||||
DiscoveryNetworksDeIE,
|
||||
DiscoveryPlusItalyIE,
|
||||
DiscoveryPlusItalyShowIE,
|
||||
DiscoveryPlusIndiaShowIE,
|
||||
)
|
||||
from .dreisat import DreiSatIE
|
||||
from .drbonanza import DRBonanzaIE
|
||||
@@ -373,12 +381,15 @@
|
||||
DiscoveryGoIE,
|
||||
DiscoveryGoPlaylistIE,
|
||||
)
|
||||
from .discoverynetworks import DiscoveryNetworksDeIE
|
||||
from .discoveryvr import DiscoveryVRIE
|
||||
from .disney import DisneyIE
|
||||
from .dispeak import DigitallySpeakingIE
|
||||
from .doodstream import DoodStreamIE
|
||||
from .dropbox import DropboxIE
|
||||
from .dropout import (
|
||||
DropoutSeasonIE,
|
||||
DropoutIE
|
||||
)
|
||||
from .dw import (
|
||||
DWIE,
|
||||
DWArticleIE,
|
||||
@@ -417,6 +428,7 @@
|
||||
ESPNIE,
|
||||
ESPNArticleIE,
|
||||
FiveThirtyEightIE,
|
||||
ESPNCricInfoIE,
|
||||
)
|
||||
from .esri import EsriVideoIE
|
||||
from .europa import EuropaIE
|
||||
@@ -428,6 +440,7 @@
|
||||
from .facebook import (
|
||||
FacebookIE,
|
||||
FacebookPluginsVideoIE,
|
||||
FacebookRedirectURLIE,
|
||||
)
|
||||
from .fancode import (
|
||||
FancodeVodIE,
|
||||
@@ -499,6 +512,14 @@
|
||||
)
|
||||
from .gaia import GaiaIE
|
||||
from .gameinformer import GameInformerIE
|
||||
from .gamejolt import (
|
||||
GameJoltIE,
|
||||
GameJoltUserIE,
|
||||
GameJoltGameIE,
|
||||
GameJoltGameSoundtrackIE,
|
||||
GameJoltCommunityIE,
|
||||
GameJoltSearchIE,
|
||||
)
|
||||
from .gamespot import GameSpotIE
|
||||
from .gamestar import GameStarIE
|
||||
from .gaskrank import GaskrankIE
|
||||
@@ -517,6 +538,7 @@
|
||||
)
|
||||
from .go import GoIE
|
||||
from .godtube import GodTubeIE
|
||||
from .gofile import GofileIE
|
||||
from .golem import GolemIE
|
||||
from .googledrive import GoogleDriveIE
|
||||
from .googlepodcasts import (
|
||||
@@ -556,6 +578,10 @@
|
||||
HRTiIE,
|
||||
HRTiPlaylistIE,
|
||||
)
|
||||
from .hse import (
|
||||
HSEShowIE,
|
||||
HSEProductIE,
|
||||
)
|
||||
from .huajiao import HuajiaoIE
|
||||
from .huffpost import HuffPostIE
|
||||
from .hungama import (
|
||||
@@ -701,6 +727,7 @@
|
||||
LineLiveChannelIE,
|
||||
)
|
||||
from .linkedin import (
|
||||
LinkedInIE,
|
||||
LinkedInLearningIE,
|
||||
LinkedInLearningCourseIE,
|
||||
)
|
||||
@@ -792,6 +819,7 @@
|
||||
)
|
||||
from .mit import TechTVMITIE, OCWMITIE
|
||||
from .mitele import MiTeleIE
|
||||
from .mixch import MixchIE
|
||||
from .mixcloud import (
|
||||
MixcloudIE,
|
||||
MixcloudUserIE,
|
||||
@@ -848,6 +876,10 @@
|
||||
N1InfoAssetIE,
|
||||
N1InfoIIE,
|
||||
)
|
||||
from .nate import (
|
||||
NateIE,
|
||||
NateProgramIE,
|
||||
)
|
||||
from .nationalgeographic import (
|
||||
NationalGeographicVideoIE,
|
||||
NationalGeographicTVIE,
|
||||
@@ -881,7 +913,10 @@
|
||||
NJoyEmbedIE,
|
||||
)
|
||||
from .ndtv import NDTVIE
|
||||
from .nebula import NebulaIE
|
||||
from .nebula import (
|
||||
NebulaIE,
|
||||
NebulaCollectionIE,
|
||||
)
|
||||
from .nerdcubed import NerdCubedFeedIE
|
||||
from .netzkino import NetzkinoIE
|
||||
from .neteasemusic import (
|
||||
@@ -935,7 +970,10 @@
|
||||
NicovideoSearchIE,
|
||||
NicovideoSearchURLIE,
|
||||
)
|
||||
from .ninecninemedia import NineCNineMediaIE
|
||||
from .ninecninemedia import (
|
||||
NineCNineMediaIE,
|
||||
CPTwentyFourIE,
|
||||
)
|
||||
from .ninegag import NineGagIE
|
||||
from .ninenow import NineNowIE
|
||||
from .nintendo import NintendoIE
|
||||
@@ -999,6 +1037,7 @@
|
||||
from .olympics import OlympicsReplayIE
|
||||
from .on24 import On24IE
|
||||
from .ondemandkorea import OnDemandKoreaIE
|
||||
from .onefootball import OneFootballIE
|
||||
from .onet import (
|
||||
OnetIE,
|
||||
OnetChannelIE,
|
||||
@@ -1010,6 +1049,10 @@
|
||||
OoyalaIE,
|
||||
OoyalaExternalIE,
|
||||
)
|
||||
from .opencast import (
|
||||
OpencastIE,
|
||||
OpencastPlaylistIE,
|
||||
)
|
||||
from .openrec import (
|
||||
OpenRecIE,
|
||||
OpenRecCaptureIE,
|
||||
@@ -1059,6 +1102,7 @@
|
||||
PeerTubeIE,
|
||||
PeerTubePlaylistIE,
|
||||
)
|
||||
from .peertv import PeerTVIE
|
||||
from .peloton import (
|
||||
PelotonIE,
|
||||
PelotonLiveIE
|
||||
@@ -1159,6 +1203,7 @@
|
||||
from .radiojavan import RadioJavanIE
|
||||
from .radiobremen import RadioBremenIE
|
||||
from .radiofrance import RadioFranceIE
|
||||
from .radiozet import RadioZetPodcastIE
|
||||
from .radiokapital import (
|
||||
RadioKapitalIE,
|
||||
RadioKapitalShowIE,
|
||||
@@ -1198,9 +1243,11 @@
|
||||
RedBullTVRrnContentIE,
|
||||
RedBullIE,
|
||||
)
|
||||
from .reddit import (
|
||||
RedditIE,
|
||||
RedditRIE,
|
||||
from .reddit import RedditIE
|
||||
from .redgifs import (
|
||||
RedGifsIE,
|
||||
RedGifsSearchIE,
|
||||
RedGifsUserIE,
|
||||
)
|
||||
from .redtube import RedTubeIE
|
||||
from .regiotv import RegioTVIE
|
||||
@@ -1228,8 +1275,15 @@
|
||||
RTL2YouSeriesIE,
|
||||
)
|
||||
from .rtp import RTPIE
|
||||
from .rtrfm import RTRFMIE
|
||||
from .rts import RTSIE
|
||||
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE, RTVELiveIE, RTVETelevisionIE
|
||||
from .rtve import (
|
||||
RTVEALaCartaIE,
|
||||
RTVEAudioIE,
|
||||
RTVELiveIE,
|
||||
RTVEInfantilIE,
|
||||
RTVETelevisionIE,
|
||||
)
|
||||
from .rtvnh import RTVNHIE
|
||||
from .rtvs import RTVSIE
|
||||
from .ruhd import RUHDIE
|
||||
@@ -1244,6 +1298,7 @@
|
||||
RutubeMovieIE,
|
||||
RutubePersonIE,
|
||||
RutubePlaylistIE,
|
||||
RutubeTagsIE,
|
||||
)
|
||||
from .rutv import RUTVIE
|
||||
from .ruutu import RuutuIE
|
||||
@@ -1269,7 +1324,7 @@
|
||||
SCTECourseIE,
|
||||
)
|
||||
from .seeker import SeekerIE
|
||||
from .senateisvp import SenateISVPIE
|
||||
from .senategov import SenateISVPIE, SenateGovIE
|
||||
from .sendtonews import SendtoNewsIE
|
||||
from .servus import ServusIE
|
||||
from .sevenplus import SevenPlusIE
|
||||
@@ -1295,6 +1350,7 @@
|
||||
)
|
||||
from .sina import SinaIE
|
||||
from .sixplay import SixPlayIE
|
||||
from .skeb import SkebIE
|
||||
from .skyit import (
|
||||
SkyItPlayerIE,
|
||||
SkyItVideoIE,
|
||||
@@ -1330,6 +1386,7 @@
|
||||
SoundcloudEmbedIE,
|
||||
SoundcloudIE,
|
||||
SoundcloudSetIE,
|
||||
SoundcloudRelatedIE,
|
||||
SoundcloudUserIE,
|
||||
SoundcloudTrackStationIE,
|
||||
SoundcloudPlaylistIE,
|
||||
@@ -1396,8 +1453,10 @@
|
||||
from .streamanity import StreamanityIE
|
||||
from .streamcloud import StreamcloudIE
|
||||
from .streamcz import StreamCZIE
|
||||
from .streamff import StreamFFIE
|
||||
from .streetvoice import StreetVoiceIE
|
||||
from .stretchinternet import StretchInternetIE
|
||||
from .stripchat import StripchatIE
|
||||
from .stv import STVPlayerIE
|
||||
from .sunporno import SunPornoIE
|
||||
from .sverigesradio import (
|
||||
@@ -1489,6 +1548,9 @@
|
||||
ToggleIE,
|
||||
MeWatchIE,
|
||||
)
|
||||
from .toggo import (
|
||||
ToggoIE,
|
||||
)
|
||||
from .tokentube import (
|
||||
TokentubeIE,
|
||||
TokentubeChannelIE
|
||||
@@ -1505,6 +1567,7 @@
|
||||
TrovoChannelVodIE,
|
||||
TrovoChannelClipIE,
|
||||
)
|
||||
from .trueid import TrueIDIE
|
||||
from .trunews import TruNewsIE
|
||||
from .trutv import TruTVIE
|
||||
from .tube8 import Tube8IE
|
||||
@@ -1782,6 +1845,7 @@
|
||||
WeiboMobileIE
|
||||
)
|
||||
from .weiqitv import WeiqiTVIE
|
||||
from .willow import WillowIE
|
||||
from .wimtv import WimTVIE
|
||||
from .whowatch import WhoWatchIE
|
||||
from .wistia import (
|
||||
|
||||
@@ -23,9 +23,11 @@
|
||||
merge_dicts,
|
||||
network_exceptions,
|
||||
parse_count,
|
||||
parse_qs,
|
||||
qualities,
|
||||
sanitized_Request,
|
||||
try_get,
|
||||
url_or_none,
|
||||
urlencode_postdata,
|
||||
urljoin,
|
||||
)
|
||||
@@ -746,3 +748,42 @@ def _real_extract(self, url):
|
||||
return self.url_result(
|
||||
compat_urllib_parse_unquote(self._match_id(url)),
|
||||
FacebookIE.ie_key())
|
||||
|
||||
|
||||
class FacebookRedirectURLIE(InfoExtractor):
|
||||
IE_DESC = False # Do not list
|
||||
_VALID_URL = r'https?://(?:[\w-]+\.)?facebook\.com/flx/warn[/?]'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.facebook.com/flx/warn/?h=TAQHsoToz&u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&s=1',
|
||||
'info_dict': {
|
||||
'id': 'pO8h3EaFRdo',
|
||||
'ext': 'mp4',
|
||||
'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set',
|
||||
'description': 'md5:2d713ccbb45b686a1888397b2c77ca6b',
|
||||
'channel_id': 'UCGBpxWJr9FNOcFYA5GkKrMg',
|
||||
'playable_in_embed': True,
|
||||
'categories': ['Music'],
|
||||
'channel': 'Boiler Room',
|
||||
'uploader_id': 'brtvofficial',
|
||||
'uploader': 'Boiler Room',
|
||||
'tags': 'count:11',
|
||||
'duration': 3332,
|
||||
'live_status': 'not_live',
|
||||
'thumbnail': 'https://i.ytimg.com/vi/pO8h3EaFRdo/maxresdefault.jpg',
|
||||
'channel_url': 'https://www.youtube.com/channel/UCGBpxWJr9FNOcFYA5GkKrMg',
|
||||
'availability': 'public',
|
||||
'uploader_url': 'http://www.youtube.com/user/brtvofficial',
|
||||
'upload_date': '20150917',
|
||||
'age_limit': 0,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
},
|
||||
'add_ie': ['Youtube'],
|
||||
'params': {'skip_download': 'Youtube'},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
redirect_url = url_or_none(parse_qs(url).get('u', [None])[-1])
|
||||
if not redirect_url:
|
||||
raise ExtractorError('Invalid facebook redirect URL', expected=True)
|
||||
return self.url_result(redirect_url)
|
||||
|
||||
@@ -41,7 +41,7 @@ class FancodeVodIE(InfoExtractor):
|
||||
_ACCESS_TOKEN = None
|
||||
_NETRC_MACHINE = 'fancode'
|
||||
|
||||
_LOGIN_HINT = 'Use "--user refresh --password <refresh_token>" to login using a refresh token'
|
||||
_LOGIN_HINT = 'Use "--username refresh --password <refresh_token>" to login using a refresh token'
|
||||
|
||||
headers = {
|
||||
'content-type': 'application/json',
|
||||
|
||||
@@ -170,7 +170,7 @@ def _real_extract(self, url):
|
||||
return {
|
||||
'id': channel_id,
|
||||
'display_id': channel_data.get('alias'),
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'title': title,
|
||||
'description': channel_data.get('description'),
|
||||
'thumbnails': thumbnails,
|
||||
'formats': formats,
|
||||
|
||||
@@ -185,7 +185,7 @@ def _extract_video(self, video_id, catalogue=None):
|
||||
'vcodec': 'none',
|
||||
'ext': 'mhtml',
|
||||
'protocol': 'mhtml',
|
||||
'url': 'about:dummy',
|
||||
'url': 'about:invalid',
|
||||
'fragments': [{
|
||||
'path': sheet,
|
||||
# XXX: not entirely accurate; each spritesheet seems to be
|
||||
@@ -203,7 +203,7 @@ def _extract_video(self, video_id, catalogue=None):
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'title': title,
|
||||
'thumbnail': image,
|
||||
'duration': duration,
|
||||
'timestamp': timestamp,
|
||||
|
||||
@@ -59,9 +59,6 @@ def _real_extract(self, url):
|
||||
stream_url, video_id, 'mp4',
|
||||
'm3u8_native', m3u8_id='hls')
|
||||
|
||||
if is_live:
|
||||
title = self._live_title(title)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
|
||||
@@ -5,19 +5,32 @@
|
||||
|
||||
|
||||
class FujiTVFODPlus7IE(InfoExtractor):
|
||||
_VALID_URL = r'https?://i\.fod\.fujitv\.co\.jp/plus7/web/[0-9a-z]{4}/(?P<id>[0-9a-z]+)'
|
||||
_VALID_URL = r'https?://fod\.fujitv\.co\.jp/title/[0-9a-z]{4}/(?P<id>[0-9a-z]+)'
|
||||
_BASE_URL = 'http://i.fod.fujitv.co.jp/'
|
||||
_BITRATE_MAP = {
|
||||
300: (320, 180),
|
||||
800: (640, 360),
|
||||
1200: (1280, 720),
|
||||
2000: (1280, 720),
|
||||
4000: (1920, 1080),
|
||||
}
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://fod.fujitv.co.jp/title/5d40/5d40810075',
|
||||
'info_dict': {
|
||||
'id': '5d40810075',
|
||||
'title': '5d40810075',
|
||||
'ext': 'mp4',
|
||||
'format_id': '4000',
|
||||
'thumbnail': 'http://i.fod.fujitv.co.jp/pc/image/wbtn/wbtn_5d40810075.jpg'
|
||||
},
|
||||
'skip': 'Expires after a week'
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
formats = self._extract_m3u8_formats(
|
||||
self._BASE_URL + 'abr/pc_html5/%s.m3u8' % video_id, video_id, 'mp4')
|
||||
self._BASE_URL + 'abr/tv_android/%s.m3u8' % video_id, video_id, 'mp4')
|
||||
for f in formats:
|
||||
wh = self._BITRATE_MAP.get(f.get('tbr'))
|
||||
if wh:
|
||||
|
||||
@@ -276,7 +276,7 @@ def _real_extract(self, url):
|
||||
def _get_subtitles(self, subtitles, experience_id, episode, display_id, format_name):
|
||||
if isinstance(episode, str):
|
||||
webpage = self._download_webpage(
|
||||
f'https://www.funimation.com/player/{experience_id}', display_id,
|
||||
f'https://www.funimation.com/player/{experience_id}/', display_id,
|
||||
fatal=False, note=f'Downloading player webpage for {format_name}')
|
||||
episode, _, _ = self._get_episode(webpage, episode_id=episode, fatal=False)
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
|
||||
class GabTVIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)tv.gab.com/channel/[^/]+/view/(?P<id>[a-z0-9-]+)'
|
||||
_VALID_URL = r'https?://tv\.gab\.com/channel/[^/]+/view/(?P<id>[a-z0-9-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://tv.gab.com/channel/wurzelroot/view/why-was-america-in-afghanistan-61217eacea5665de450d0488',
|
||||
'info_dict': {
|
||||
|
||||
540
yt_dlp/extractor/gamejolt.py
Normal file
540
yt_dlp/extractor/gamejolt.py
Normal file
@@ -0,0 +1,540 @@
|
||||
# coding: utf-8
|
||||
import itertools
|
||||
import json
|
||||
import math
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
str_or_none,
|
||||
traverse_obj,
|
||||
try_get
|
||||
)
|
||||
|
||||
|
||||
class GameJoltBaseIE(InfoExtractor):
|
||||
_API_BASE = 'https://gamejolt.com/site-api/'
|
||||
|
||||
def _call_api(self, endpoint, *args, **kwargs):
|
||||
kwargs.setdefault('headers', {}).update({'Accept': 'image/webp,*/*'})
|
||||
return self._download_json(self._API_BASE + endpoint, *args, **kwargs)['payload']
|
||||
|
||||
def _parse_content_as_text(self, content):
|
||||
outer_contents, joined_contents = content.get('content') or [], []
|
||||
for outer_content in outer_contents:
|
||||
if outer_content.get('type') != 'paragraph':
|
||||
joined_contents.append(self._parse_content_as_text(outer_content))
|
||||
continue
|
||||
inner_contents, inner_content_text = outer_content.get('content') or [], ''
|
||||
for inner_content in inner_contents:
|
||||
if inner_content.get('text'):
|
||||
inner_content_text += inner_content['text']
|
||||
elif inner_content.get('type') == 'hardBreak':
|
||||
inner_content_text += '\n'
|
||||
joined_contents.append(inner_content_text)
|
||||
|
||||
return '\n'.join(joined_contents)
|
||||
|
||||
def _get_comments(self, post_num_id, post_hash_id):
|
||||
sort_by, scroll_id = self._configuration_arg('comment_sort', ['hot'], ie_key=GameJoltIE.ie_key())[0], -1
|
||||
is_scrolled = sort_by in ('new', 'you')
|
||||
for page in itertools.count(1):
|
||||
comments_data = self._call_api(
|
||||
'comments/Fireside_Post/%s/%s?%s=%d' % (
|
||||
post_num_id, sort_by,
|
||||
'scroll_id' if is_scrolled else 'page', scroll_id if is_scrolled else page),
|
||||
post_hash_id, note='Downloading comments list page %d' % page)
|
||||
if not comments_data.get('comments'):
|
||||
break
|
||||
for comment in traverse_obj(comments_data, (('comments', 'childComments'), ...), expected_type=dict, default=[]):
|
||||
yield {
|
||||
'id': comment['id'],
|
||||
'text': self._parse_content_as_text(
|
||||
self._parse_json(comment['comment_content'], post_hash_id)),
|
||||
'timestamp': int_or_none(comment.get('posted_on'), scale=1000),
|
||||
'like_count': comment.get('votes'),
|
||||
'author': traverse_obj(comment, ('user', ('display_name', 'name')), expected_type=str_or_none, get_all=False),
|
||||
'author_id': traverse_obj(comment, ('user', 'username'), expected_type=str_or_none),
|
||||
'author_thumbnail': traverse_obj(comment, ('user', 'image_avatar'), expected_type=str_or_none),
|
||||
'parent': comment.get('parent_id') or None,
|
||||
}
|
||||
scroll_id = int_or_none(comments_data['comments'][-1].get('posted_on'))
|
||||
|
||||
def _parse_post(self, post_data):
|
||||
post_id = post_data['hash']
|
||||
lead_content = self._parse_json(post_data.get('lead_content') or '{}', post_id, fatal=False) or {}
|
||||
description, full_description = post_data.get('leadStr') or self._parse_content_as_text(
|
||||
self._parse_json(post_data.get('lead_content'), post_id)), None
|
||||
if post_data.get('has_article'):
|
||||
article_content = self._parse_json(
|
||||
post_data.get('article_content')
|
||||
or self._call_api(f'web/posts/article/{post_data.get("id", post_id)}', post_id,
|
||||
note='Downloading article metadata', errnote='Unable to download article metadata', fatal=False).get('article'),
|
||||
post_id, fatal=False)
|
||||
full_description = self._parse_content_as_text(article_content)
|
||||
|
||||
user_data = post_data.get('user') or {}
|
||||
info_dict = {
|
||||
'extractor_key': GameJoltIE.ie_key(),
|
||||
'extractor': 'GameJolt',
|
||||
'webpage_url': str_or_none(post_data.get('url')) or f'https://gamejolt.com/p/{post_id}',
|
||||
'id': post_id,
|
||||
'title': description,
|
||||
'description': full_description or description,
|
||||
'display_id': post_data.get('slug'),
|
||||
'uploader': user_data.get('display_name') or user_data.get('name'),
|
||||
'uploader_id': user_data.get('username'),
|
||||
'uploader_url': 'https://gamejolt.com' + user_data['url'] if user_data.get('url') else None,
|
||||
'categories': [try_get(category, lambda x: '%s - %s' % (x['community']['name'], x['channel'].get('display_title') or x['channel']['title']))
|
||||
for category in post_data.get('communities' or [])],
|
||||
'tags': traverse_obj(
|
||||
lead_content, ('content', ..., 'content', ..., 'marks', ..., 'attrs', 'tag'), expected_type=str_or_none),
|
||||
'like_count': int_or_none(post_data.get('like_count')),
|
||||
'comment_count': int_or_none(post_data.get('comment_count'), default=0),
|
||||
'timestamp': int_or_none(post_data.get('added_on'), scale=1000),
|
||||
'release_timestamp': int_or_none(post_data.get('published_on'), scale=1000),
|
||||
'__post_extractor': self.extract_comments(post_data.get('id'), post_id)
|
||||
}
|
||||
|
||||
# TODO: Handle multiple videos/embeds?
|
||||
video_data = traverse_obj(post_data, ('videos', ...), expected_type=dict, get_all=False) or {}
|
||||
formats, subtitles, thumbnails = [], {}, []
|
||||
for media in video_data.get('media') or []:
|
||||
media_url, mimetype, ext, media_id = media['img_url'], media.get('filetype', ''), determine_ext(media['img_url']), media.get('type')
|
||||
if mimetype == 'application/vnd.apple.mpegurl' or ext == 'm3u8':
|
||||
hls_formats, hls_subs = self._extract_m3u8_formats_and_subtitles(media_url, post_id, 'mp4', m3u8_id=media_id)
|
||||
formats.extend(hls_formats)
|
||||
subtitles.update(hls_subs)
|
||||
elif mimetype == 'application/dash+xml' or ext == 'mpd':
|
||||
dash_formats, dash_subs = self._extract_mpd_formats_and_subtitles(media_url, post_id, mpd_id=media_id)
|
||||
formats.extend(dash_formats)
|
||||
subtitles.update(dash_subs)
|
||||
elif 'image' in mimetype:
|
||||
thumbnails.append({
|
||||
'id': media_id,
|
||||
'url': media_url,
|
||||
'width': media.get('width'),
|
||||
'height': media.get('height'),
|
||||
'filesize': media.get('filesize'),
|
||||
})
|
||||
else:
|
||||
formats.append({
|
||||
'format_id': media_id,
|
||||
'url': media_url,
|
||||
'width': media.get('width'),
|
||||
'height': media.get('height'),
|
||||
'filesize': media.get('filesize'),
|
||||
'acodec': 'none' if 'video-card' in media_url else None,
|
||||
})
|
||||
|
||||
if formats:
|
||||
return {
|
||||
**info_dict,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'thumbnails': thumbnails,
|
||||
'view_count': int_or_none(video_data.get('view_count')),
|
||||
}
|
||||
|
||||
gif_entries = []
|
||||
for media in post_data.get('media', []):
|
||||
if determine_ext(media['img_url']) != 'gif' or 'gif' not in media.get('filetype', ''):
|
||||
continue
|
||||
gif_entries.append({
|
||||
'id': media['hash'],
|
||||
'title': media['filename'].split('.')[0],
|
||||
'formats': [{
|
||||
'format_id': url_key,
|
||||
'url': media[url_key],
|
||||
'width': media.get('width') if url_key == 'img_url' else None,
|
||||
'height': media.get('height') if url_key == 'img_url' else None,
|
||||
'filesize': media.get('filesize') if url_key == 'img_url' else None,
|
||||
'acodec': 'none',
|
||||
} for url_key in ('img_url', 'mediaserver_url', 'mediaserver_url_mp4', 'mediaserver_url_webm') if media.get(url_key)]
|
||||
})
|
||||
if gif_entries:
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
**info_dict,
|
||||
'entries': gif_entries,
|
||||
}
|
||||
|
||||
embed_url = traverse_obj(post_data, ('embeds', ..., 'url'), expected_type=str_or_none, get_all=False)
|
||||
if embed_url:
|
||||
return self.url_result(embed_url)
|
||||
return info_dict
|
||||
|
||||
|
||||
class GameJoltIE(GameJoltBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?gamejolt\.com/p/(?:[\w-]*-)?(?P<id>\w{8})'
|
||||
_TESTS = [{
|
||||
# No audio
|
||||
'url': 'https://gamejolt.com/p/introducing-ramses-jackson-some-fnf-himbo-i-ve-been-animating-fo-c6achnzu',
|
||||
'md5': 'cd5f733258f6678b0ce500dd88166d86',
|
||||
'info_dict': {
|
||||
'id': 'c6achnzu',
|
||||
'ext': 'mp4',
|
||||
'display_id': 'introducing-ramses-jackson-some-fnf-himbo-i-ve-been-animating-fo-c6achnzu',
|
||||
'title': 'Introducing Ramses Jackson, some FNF himbo I’ve been animating for the past few days, hehe.\n#fnfmod #fridaynightfunkin',
|
||||
'description': 'Introducing Ramses Jackson, some FNF himbo I’ve been animating for the past few days, hehe.\n#fnfmod #fridaynightfunkin',
|
||||
'uploader': 'Jakeneutron',
|
||||
'uploader_id': 'Jakeneutron',
|
||||
'uploader_url': 'https://gamejolt.com/@Jakeneutron',
|
||||
'categories': ['Friday Night Funkin\' - Videos'],
|
||||
'tags': ['fnfmod', 'fridaynightfunkin'],
|
||||
'timestamp': 1633499590,
|
||||
'upload_date': '20211006',
|
||||
'release_timestamp': 1633499655,
|
||||
'release_date': '20211006',
|
||||
'thumbnail': 're:^https?://.+wgch9mhq.png$',
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
'view_count': int,
|
||||
}
|
||||
}, {
|
||||
# YouTube embed
|
||||
'url': 'https://gamejolt.com/p/hey-hey-if-there-s-anyone-who-s-looking-to-get-into-learning-a-n6g4jzpq',
|
||||
'md5': '79a931ff500a5c783ef6c3bda3272e32',
|
||||
'info_dict': {
|
||||
'id': 'XsNA_mzC0q4',
|
||||
'title': 'Adobe Animate CC 2021 Tutorial || Part 1 - The Basics',
|
||||
'description': 'md5:9d1ab9e2625b3fe1f42b2a44c67fdd13',
|
||||
'uploader': 'Jakeneutron',
|
||||
'uploader_id': 'Jakeneutron',
|
||||
'uploader_url': 'http://www.youtube.com/user/Jakeneutron',
|
||||
'ext': 'mp4',
|
||||
'duration': 1749,
|
||||
'tags': ['Adobe Animate CC', 'Tutorial', 'Animation', 'The Basics', 'For Beginners'],
|
||||
'like_count': int,
|
||||
'playable_in_embed': True,
|
||||
'categories': ['Education'],
|
||||
'availability': 'public',
|
||||
'thumbnail': 'https://i.ytimg.com/vi_webp/XsNA_mzC0q4/maxresdefault.webp',
|
||||
'age_limit': 0,
|
||||
'live_status': 'not_live',
|
||||
'channel_url': 'https://www.youtube.com/channel/UC6_L7fnczNalFZyBthUE9oA',
|
||||
'channel': 'Jakeneutron',
|
||||
'channel_id': 'UC6_L7fnczNalFZyBthUE9oA',
|
||||
'upload_date': '20211015',
|
||||
'view_count': int,
|
||||
'chapters': 'count:18',
|
||||
}
|
||||
}, {
|
||||
# Article
|
||||
'url': 'https://gamejolt.com/p/i-fuckin-broke-chaos-d56h3eue',
|
||||
'md5': '786c1ccf98fde02c03a2768acb4258d0',
|
||||
'info_dict': {
|
||||
'id': 'd56h3eue',
|
||||
'ext': 'mp4',
|
||||
'display_id': 'i-fuckin-broke-chaos-d56h3eue',
|
||||
'title': 'I fuckin broke Chaos.',
|
||||
'description': 'I moved my tab durning the cutscene so now it\'s stuck like this.',
|
||||
'uploader': 'Jeff____________',
|
||||
'uploader_id': 'The_Nyesh_Man',
|
||||
'uploader_url': 'https://gamejolt.com/@The_Nyesh_Man',
|
||||
'categories': ['Friday Night Funkin\' - Videos'],
|
||||
'timestamp': 1639800264,
|
||||
'upload_date': '20211218',
|
||||
'release_timestamp': 1639800330,
|
||||
'release_date': '20211218',
|
||||
'thumbnail': 're:^https?://.+euksy8bd.png$',
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
'view_count': int,
|
||||
}
|
||||
}, {
|
||||
# Single GIF
|
||||
'url': 'https://gamejolt.com/p/hello-everyone-i-m-developing-a-pixel-art-style-mod-for-fnf-and-i-vs4gdrd8',
|
||||
'info_dict': {
|
||||
'id': 'vs4gdrd8',
|
||||
'display_id': 'hello-everyone-i-m-developing-a-pixel-art-style-mod-for-fnf-and-i-vs4gdrd8',
|
||||
'title': 'md5:cc3d8b031d9bc7ec2ec5a9ffc707e1f9',
|
||||
'description': 'md5:cc3d8b031d9bc7ec2ec5a9ffc707e1f9',
|
||||
'uploader': 'Quesoguy',
|
||||
'uploader_id': 'CheeseguyDev',
|
||||
'uploader_url': 'https://gamejolt.com/@CheeseguyDev',
|
||||
'categories': ['Game Dev - General', 'Arts n\' Crafts - Creations', 'Pixel Art - showcase',
|
||||
'Friday Night Funkin\' - Mods', 'Newgrounds - Friday Night Funkin (13+)'],
|
||||
'timestamp': 1639517122,
|
||||
'release_timestamp': 1639519966,
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
},
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': 'dszyjnwi',
|
||||
'ext': 'webm',
|
||||
'title': 'gif-presentacion-mejorado-dszyjnwi',
|
||||
'n_entries': 1,
|
||||
}
|
||||
}]
|
||||
}, {
|
||||
# Multiple GIFs
|
||||
'url': 'https://gamejolt.com/p/gif-yhsqkumq',
|
||||
'playlist_count': 35,
|
||||
'info_dict': {
|
||||
'id': 'yhsqkumq',
|
||||
'display_id': 'gif-yhsqkumq',
|
||||
'title': 'GIF',
|
||||
'description': 'GIF',
|
||||
'uploader': 'DaniilTvman',
|
||||
'uploader_id': 'DaniilTvman',
|
||||
'uploader_url': 'https://gamejolt.com/@DaniilTvman',
|
||||
'categories': ['Five Nights At The AGK Studio Comunity - NEWS game'],
|
||||
'timestamp': 1638721559,
|
||||
'release_timestamp': 1638722276,
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
post_id = self._match_id(url)
|
||||
post_data = self._call_api(
|
||||
f'web/posts/view/{post_id}', post_id)['post']
|
||||
return self._parse_post(post_data)
|
||||
|
||||
|
||||
class GameJoltPostListBaseIE(GameJoltBaseIE):
|
||||
def _entries(self, endpoint, list_id, note='Downloading post list', errnote='Unable to download post list', initial_items=[]):
|
||||
page_num, scroll_id = 1, None
|
||||
items = initial_items or self._call_api(endpoint, list_id, note=note, errnote=errnote)['items']
|
||||
while items:
|
||||
for item in items:
|
||||
yield self._parse_post(item['action_resource_model'])
|
||||
scroll_id = items[-1]['scroll_id']
|
||||
page_num += 1
|
||||
items = self._call_api(
|
||||
endpoint, list_id, note=f'{note} page {page_num}', errnote=errnote, data=json.dumps({
|
||||
'scrollDirection': 'from',
|
||||
'scrollId': scroll_id,
|
||||
}).encode('utf-8')).get('items')
|
||||
|
||||
|
||||
class GameJoltUserIE(GameJoltPostListBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?gamejolt\.com/@(?P<id>[\w-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://gamejolt.com/@BlazikenSuperStar',
|
||||
'playlist_mincount': 1,
|
||||
'info_dict': {
|
||||
'id': '6116784',
|
||||
'title': 'S. Blaze',
|
||||
'description': 'md5:5ba7fbbb549e8ea2545aafbfe22eb03a',
|
||||
},
|
||||
'params': {
|
||||
'ignore_no_formats_error': True,
|
||||
},
|
||||
'expected_warnings': ['skipping format', 'No video formats found', 'Requested format is not available'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
user_id = self._match_id(url)
|
||||
user_data = self._call_api(
|
||||
f'web/profile/@{user_id}', user_id, note='Downloading user info', errnote='Unable to download user info')['user']
|
||||
bio = self._parse_content_as_text(
|
||||
self._parse_json(user_data.get('bio_content', '{}'), user_id, fatal=False) or {})
|
||||
return self.playlist_result(
|
||||
self._entries(f'web/posts/fetch/user/@{user_id}?tab=active', user_id, 'Downloading user posts', 'Unable to download user posts'),
|
||||
str_or_none(user_data.get('id')), user_data.get('display_name') or user_data.get('name'), bio)
|
||||
|
||||
|
||||
class GameJoltGameIE(GameJoltPostListBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?gamejolt\.com/games/[\w-]+/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://gamejolt.com/games/Friday4Fun/655124',
|
||||
'playlist_mincount': 2,
|
||||
'info_dict': {
|
||||
'id': '655124',
|
||||
'title': 'Friday Night Funkin\': Friday 4 Fun',
|
||||
'description': 'md5:576a7dd87912a2dcf33c50d2bd3966d3'
|
||||
},
|
||||
'params': {
|
||||
'ignore_no_formats_error': True,
|
||||
},
|
||||
'expected_warnings': ['skipping format', 'No video formats found', 'Requested format is not available'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
game_id = self._match_id(url)
|
||||
game_data = self._call_api(
|
||||
f'web/discover/games/{game_id}', game_id, note='Downloading game info', errnote='Unable to download game info')['game']
|
||||
description = self._parse_content_as_text(
|
||||
self._parse_json(game_data.get('description_content', '{}'), game_id, fatal=False) or {})
|
||||
return self.playlist_result(
|
||||
self._entries(f'web/posts/fetch/game/{game_id}', game_id, 'Downloading game posts', 'Unable to download game posts'),
|
||||
game_id, game_data.get('title'), description)
|
||||
|
||||
|
||||
class GameJoltGameSoundtrackIE(GameJoltBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?gamejolt\.com/get/soundtrack(?:\?|\#!?)(?:.*?[&;])??game=(?P<id>(?:\d+)+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://gamejolt.com/get/soundtrack?foo=bar&game=657899',
|
||||
'info_dict': {
|
||||
'id': '657899',
|
||||
'title': 'Friday Night Funkin\': Vs Oswald',
|
||||
'n_entries': None,
|
||||
},
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': '184434',
|
||||
'ext': 'mp3',
|
||||
'title': 'Gettin\' Lucky (Menu Music)',
|
||||
'url': r're:^https://.+vs-oswald-menu-music\.mp3$',
|
||||
'release_timestamp': 1635190816,
|
||||
'release_date': '20211025',
|
||||
'n_entries': 3,
|
||||
}
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': '184435',
|
||||
'ext': 'mp3',
|
||||
'title': 'Rabbit\'s Luck (Extended Version)',
|
||||
'url': r're:^https://.+rabbit-s-luck--full-version-\.mp3$',
|
||||
'release_timestamp': 1635190841,
|
||||
'release_date': '20211025',
|
||||
'n_entries': 3,
|
||||
}
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': '185228',
|
||||
'ext': 'mp3',
|
||||
'title': 'Last Straw',
|
||||
'url': r're:^https://.+last-straw\.mp3$',
|
||||
'release_timestamp': 1635881104,
|
||||
'release_date': '20211102',
|
||||
'n_entries': 3,
|
||||
}
|
||||
}]
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
game_id = self._match_id(url)
|
||||
game_overview = self._call_api(
|
||||
f'web/discover/games/overview/{game_id}', game_id, note='Downloading soundtrack info', errnote='Unable to download soundtrack info')
|
||||
return self.playlist_result([{
|
||||
'id': str_or_none(song.get('id')),
|
||||
'title': str_or_none(song.get('title')),
|
||||
'url': str_or_none(song.get('url')),
|
||||
'release_timestamp': int_or_none(song.get('posted_on'), scale=1000),
|
||||
} for song in game_overview.get('songs') or []], game_id, traverse_obj(
|
||||
game_overview, ('microdata', 'name'), (('twitter', 'fb'), 'title'), expected_type=str_or_none, get_all=False))
|
||||
|
||||
|
||||
class GameJoltCommunityIE(GameJoltPostListBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?gamejolt\.com/c/(?P<id>(?P<community>[\w-]+)(?:/(?P<channel>[\w-]+))?)(?:(?:\?|\#!?)(?:.*?[&;])??sort=(?P<sort>\w+))?'
|
||||
_TESTS = [{
|
||||
'url': 'https://gamejolt.com/c/fnf/videos',
|
||||
'playlist_mincount': 50,
|
||||
'info_dict': {
|
||||
'id': 'fnf/videos',
|
||||
'title': 'Friday Night Funkin\' - Videos',
|
||||
'description': 'md5:6d8c06f27460f7d35c1554757ffe53c8'
|
||||
},
|
||||
'params': {
|
||||
'playlistend': 50,
|
||||
'ignore_no_formats_error': True,
|
||||
},
|
||||
'expected_warnings': ['skipping format', 'No video formats found', 'Requested format is not available'],
|
||||
}, {
|
||||
'url': 'https://gamejolt.com/c/youtubers',
|
||||
'playlist_mincount': 50,
|
||||
'info_dict': {
|
||||
'id': 'youtubers/featured',
|
||||
'title': 'Youtubers - featured',
|
||||
'description': 'md5:53e5582c93dcc467ab597bfca4db17d4'
|
||||
},
|
||||
'params': {
|
||||
'playlistend': 50,
|
||||
'ignore_no_formats_error': True,
|
||||
},
|
||||
'expected_warnings': ['skipping format', 'No video formats found', 'Requested format is not available'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id, community_id, channel_id, sort_by = self._match_valid_url(url).group('id', 'community', 'channel', 'sort')
|
||||
channel_id, sort_by = channel_id or 'featured', sort_by or 'new'
|
||||
|
||||
community_data = self._call_api(
|
||||
f'web/communities/view/{community_id}', display_id,
|
||||
note='Downloading community info', errnote='Unable to download community info')['community']
|
||||
channel_data = traverse_obj(self._call_api(
|
||||
f'web/communities/view-channel/{community_id}/{channel_id}', display_id,
|
||||
note='Downloading channel info', errnote='Unable to download channel info', fatal=False), 'channel') or {}
|
||||
|
||||
title = f'{community_data.get("name") or community_id} - {channel_data.get("display_title") or channel_id}'
|
||||
description = self._parse_content_as_text(
|
||||
self._parse_json(community_data.get('description_content') or '{}', display_id, fatal=False) or {})
|
||||
return self.playlist_result(
|
||||
self._entries(
|
||||
f'web/posts/fetch/community/{community_id}?channels[]={sort_by}&channels[]={channel_id}',
|
||||
display_id, 'Downloading community posts', 'Unable to download community posts'),
|
||||
f'{community_id}/{channel_id}', title, description)
|
||||
|
||||
|
||||
class GameJoltSearchIE(GameJoltPostListBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?gamejolt\.com/search(?:/(?P<filter>communities|users|games))?(?:\?|\#!?)(?:.*?[&;])??q=(?P<id>(?:[^&#]+)+)'
|
||||
_URL_FORMATS = {
|
||||
'users': 'https://gamejolt.com/@{username}',
|
||||
'communities': 'https://gamejolt.com/c/{path}',
|
||||
'games': 'https://gamejolt.com/games/{slug}/{id}',
|
||||
}
|
||||
_TESTS = [{
|
||||
'url': 'https://gamejolt.com/search?foo=bar&q=%23fnf',
|
||||
'playlist_mincount': 50,
|
||||
'info_dict': {
|
||||
'id': '#fnf',
|
||||
'title': '#fnf',
|
||||
},
|
||||
'params': {
|
||||
'playlistend': 50,
|
||||
'ignore_no_formats_error': True,
|
||||
},
|
||||
'expected_warnings': ['skipping format', 'No video formats found', 'Requested format is not available'],
|
||||
}, {
|
||||
'url': 'https://gamejolt.com/search/communities?q=cookie%20run',
|
||||
'playlist_mincount': 10,
|
||||
'info_dict': {
|
||||
'id': 'cookie run',
|
||||
'title': 'cookie run',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://gamejolt.com/search/users?q=mlp',
|
||||
'playlist_mincount': 278,
|
||||
'info_dict': {
|
||||
'id': 'mlp',
|
||||
'title': 'mlp',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://gamejolt.com/search/games?q=roblox',
|
||||
'playlist_mincount': 688,
|
||||
'info_dict': {
|
||||
'id': 'roblox',
|
||||
'title': 'roblox',
|
||||
},
|
||||
}]
|
||||
|
||||
def _search_entries(self, query, filter_mode, display_query):
|
||||
initial_search_data = self._call_api(
|
||||
f'web/search/{filter_mode}?q={query}', display_query,
|
||||
note=f'Downloading {filter_mode} list', errnote=f'Unable to download {filter_mode} list')
|
||||
entries_num = traverse_obj(initial_search_data, 'count', f'{filter_mode}Count')
|
||||
if not entries_num:
|
||||
return
|
||||
for page in range(1, math.ceil(entries_num / initial_search_data['perPage']) + 1):
|
||||
search_results = self._call_api(
|
||||
f'web/search/{filter_mode}?q={query}&page={page}', display_query,
|
||||
note=f'Downloading {filter_mode} list page {page}', errnote=f'Unable to download {filter_mode} list')
|
||||
for result in search_results[filter_mode]:
|
||||
yield self.url_result(self._URL_FORMATS[filter_mode].format(**result))
|
||||
|
||||
def _real_extract(self, url):
|
||||
filter_mode, query = self._match_valid_url(url).group('filter', 'id')
|
||||
display_query = compat_urllib_parse_unquote(query)
|
||||
return self.playlist_result(
|
||||
self._search_entries(query, filter_mode, display_query) if filter_mode else self._entries(
|
||||
f'web/posts/fetch/search/{query}', display_query, initial_items=self._call_api(
|
||||
f'web/search?q={query}', display_query,
|
||||
note='Downloading initial post list', errnote='Unable to download initial post list')['posts']),
|
||||
display_query, display_query)
|
||||
@@ -56,7 +56,7 @@
|
||||
from .myvi import MyviIE
|
||||
from .condenast import CondeNastIE
|
||||
from .udn import UDNEmbedIE
|
||||
from .senateisvp import SenateISVPIE
|
||||
from .senategov import SenateISVPIE
|
||||
from .svt import SVTIE
|
||||
from .pornhub import PornHubIE
|
||||
from .xhamster import XHamsterEmbedIE
|
||||
@@ -136,6 +136,7 @@
|
||||
from .simplecast import SimplecastIE
|
||||
from .wimtv import WimTVIE
|
||||
from .tvp import TVPEmbedIE
|
||||
from .blogger import BloggerIE
|
||||
|
||||
|
||||
class GenericIE(InfoExtractor):
|
||||
@@ -2173,6 +2174,17 @@ class GenericIE(InfoExtractor):
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
# blogger embed
|
||||
'url': 'https://blog.tomeuvizoso.net/2019/01/a-panfrost-milestone.html',
|
||||
'md5': 'f1bc19b6ea1b0fd1d81e84ca9ec467ac',
|
||||
'info_dict': {
|
||||
'id': 'BLOGGER-video-3c740e3a49197e16-796',
|
||||
'ext': 'mp4',
|
||||
'title': 'Blogger',
|
||||
'thumbnail': r're:^https?://.*',
|
||||
},
|
||||
},
|
||||
# {
|
||||
# # TODO: find another test
|
||||
# # http://schema.org/VideoObject
|
||||
@@ -2332,6 +2344,34 @@ class GenericIE(InfoExtractor):
|
||||
'thumbnail': 'https://bogmedia.org/contents/videos_screenshots/21000/21217/preview_480p.mp4.jpg',
|
||||
}
|
||||
},
|
||||
{
|
||||
# Reddit-hosted video that will redirect and be processed by RedditIE
|
||||
# Redirects to https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/
|
||||
'url': 'https://v.redd.it/zv89llsvexdz',
|
||||
'md5': '87f5f02f6c1582654146f830f21f8662',
|
||||
'info_dict': {
|
||||
'id': 'zv89llsvexdz',
|
||||
'ext': 'mp4',
|
||||
'timestamp': 1501941939.0,
|
||||
'title': 'That small heart attack.',
|
||||
'upload_date': '20170805',
|
||||
'uploader': 'Antw87'
|
||||
}
|
||||
},
|
||||
{
|
||||
# 1080p Reddit-hosted video that will redirect and be processed by RedditIE
|
||||
'url': 'https://v.redd.it/33hgok7dfbz71/',
|
||||
'md5': '7a1d587940242c9bb3bd6eb320b39258',
|
||||
'info_dict': {
|
||||
'id': '33hgok7dfbz71',
|
||||
'ext': 'mp4',
|
||||
'title': "The game Didn't want me to Knife that Guy I guess",
|
||||
'uploader': 'paraf1ve',
|
||||
'timestamp': 1636788683.0,
|
||||
'upload_date': '20211113'
|
||||
}
|
||||
}
|
||||
#
|
||||
]
|
||||
|
||||
def report_following_redirect(self, new_url):
|
||||
@@ -2561,6 +2601,8 @@ def _real_extract(self, url):
|
||||
subtitles = {}
|
||||
if format_id.endswith('mpegurl'):
|
||||
formats, subtitles = self._extract_m3u8_formats_and_subtitles(url, video_id, 'mp4')
|
||||
elif format_id.endswith('mpd') or format_id.endswith('dash+xml'):
|
||||
formats, subtitles = self._extract_mpd_formats_and_subtitles(url, video_id)
|
||||
elif format_id == 'f4m':
|
||||
formats = self._extract_f4m_formats(url, video_id)
|
||||
else:
|
||||
@@ -3216,6 +3258,11 @@ def _real_extract(self, url):
|
||||
if onionstudios_url:
|
||||
return self.url_result(onionstudios_url)
|
||||
|
||||
# Look for Blogger embeds
|
||||
blogger_urls = BloggerIE._extract_urls(webpage)
|
||||
if blogger_urls:
|
||||
return self.playlist_from_matches(blogger_urls, video_id, video_title, ie=BloggerIE.ie_key())
|
||||
|
||||
# Look for ViewLift embeds
|
||||
viewlift_url = ViewLiftEmbedIE._extract_url(webpage)
|
||||
if viewlift_url:
|
||||
@@ -3606,6 +3653,10 @@ def _real_extract(self, url):
|
||||
json_ld = self._search_json_ld(webpage, video_id, default={})
|
||||
if json_ld.get('url'):
|
||||
self.report_detected('JSON LD')
|
||||
if determine_ext(json_ld.get('url')) == 'm3u8':
|
||||
json_ld['formats'], json_ld['subtitles'] = self._extract_m3u8_formats_and_subtitles(
|
||||
json_ld['url'], video_id, 'mp4')
|
||||
json_ld.pop('url')
|
||||
return merge_dicts(json_ld, info_dict)
|
||||
|
||||
def check_video(vurl):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user