BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
+
def build_completion(opt_parser):
opts_flag = []
for group in opt_parser.option_groups:
for option in group.option_list:
- #for every long flag
+ # for every long flag
opts_flag.append(option.get_opt_string())
with open(BASH_COMPLETION_TEMPLATE) as f:
template = f.read()
with open(BASH_COMPLETION_FILE, "w") as f:
- #just using the special char
+ # just using the special char
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
f.write(filled_template)
#==============================================================================
+
class BuildError(Exception):
def __init__(self, output, code=500):
self.output = output
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
- actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching.
+ actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching.
def do_GET(self):
path = urlparse.urlparse(self.path)
'batch-file': ['--require-parameter'],
}
+
def build_completion(opt_parser):
commands = []
versions_info = json.load(open('update/versions.json'))
if 'signature' in versions_info:
- del versions_info['signature']
+ del versions_info['signature']
print('Enter the PKCS1 private key, followed by a blank line:')
privkey = b''
while True:
- try:
- line = input()
- except EOFError:
- break
- if line == '':
- break
- privkey += line.encode('ascii') + b'\n'
+ try:
+ line = input()
+ except EOFError:
+ break
+ if line == '':
+ break
+ privkey += line.encode('ascii') + b'\n'
privkey = rsa.PrivateKey.load_pkcs1(privkey)
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
print('signature: ' + signature)
versions_info['signature'] = signature
-json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
\ No newline at end of file
+json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
import datetime
import glob
-import io # For Python 2 compatibilty
+import io # For Python 2 compatibilty
import os
import re
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
atom_file.write(atom_template)
-
import youtube_dl
+
def main():
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
template = tmplf.read()
try:
import urllib.request as compat_urllib_request
-except ImportError: # Python 2
+except ImportError: # Python 2
import urllib2 as compat_urllib_request
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
try:
- raw_input()
-except NameError: # Python 3
- input()
+ raw_input()
+except NameError: # Python 3
+ input()
filename = sys.argv[0]
"dll_excludes": ['w9xpopen.exe']
}
-setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)
\ No newline at end of file
+setup(console=['youtube-dl.py'], options={"py2exe": py2exe_options}, zipfile=None)
import urllib2
import json, hashlib
+
def rsa_verify(message, signature, key):
from struct import pack
from hashlib import sha256
from sys import version_info
+
def b(x):
- if version_info[0] == 2: return x
- else: return x.encode('latin1')
+ if version_info[0] == 2:
+ return x
+ else:
+ return x.encode('latin1')
assert(type(message) == type(b('')))
block_size = 0
n = key[0]
raw_bytes.insert(0, pack("B", signature & 0xFF))
signature >>= 8
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
- if signature[0:2] != b('\x00\x01'): return False
+ if signature[0:2] != b('\x00\x01'):
+ return False
signature = signature[2:]
- if not b('\x00') in signature: return False
+ if not b('\x00') in signature:
+ return False
signature = signature[signature.index(b('\x00'))+1:]
- if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
+ if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')):
+ return False
signature = signature[19:]
- if signature != sha256(message).digest(): return False
+ if signature != sha256(message).digest():
+ return False
return True
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s"
del "%s"
- \n""" %(exe, exe, bat))
+ \n""" % (exe, exe, bat))
b.close()
os.startfile(bat)
params = get_params(override=override)
super(FakeYDL, self).__init__(params, auto_init=False)
self.result = []
-
+
def to_screen(self, s, skip_eol=None):
print(s)
def expect_warning(self, regex):
# Silence an expected warning matching a regex
old_report_warning = self.report_warning
+
def report_warning(self, message):
- if re.match(regex, message): return
+ if re.match(regex, message):
+ return
old_report_warning(message)
self.report_warning = types.MethodType(report_warning, self)
'ext': 'mp4',
'width': None,
}
+
def fname(templ):
ydl = YoutubeDL({'outtmpl': templ})
return ydl.prepare_filename(info)
def test_youtube_playlist_matching(self):
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
- assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585
+ assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585
assertPlaylist('PL63F0C78739B09958')
assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
- assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
+ assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
# Top tracks
assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
def test_youtube_matching(self):
self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
- self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
+ self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) # 668
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
RETRIES = 3
+
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
self.to_stderr = self.to_screen
self.processed_info_dicts = []
super(YoutubeDL, self).__init__(*args, **kwargs)
+
def report_warning(self, message):
# Don't accept warnings during tests
raise ExtractorError(message)
+
def process_info(self, info_dict):
self.processed_info_dicts.append(info_dict)
return super(YoutubeDL, self).process_info(info_dict)
+
def _file_md5(fn):
with open(fn, 'rb') as f:
return hashlib.md5(f.read()).hexdigest()
class TestDownload(unittest.TestCase):
maxDiff = None
+
def setUp(self):
self.defs = defs
-### Dynamically generate tests
+# Dynamically generate tests
+
+
def generator(test_case):
def test_template(self):
ydl = YoutubeDL(params, auto_init=False)
ydl.add_default_info_extractors()
finished_hook_called = set()
+
def _hook(status):
if status['status'] == 'finished':
finished_hook_called.add(status['filename'])
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
res_dict = None
+
def try_rm_tcs_files(tcs=None):
if tcs is None:
tcs = test_cases
return test_template
-### And add them to TestDownload
+# And add them to TestDownload
for n, test_case in enumerate(defs):
test_method = generator(test_case)
tname = 'test_' + str(test_case['name'])
class BaseTestSubtitles(unittest.TestCase):
url = None
IE = None
+
def setUp(self):
self.DL = FakeYDL()
self.ie = self.IE(self.DL)
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
- #keep the list ordered
+ # keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(
unescapeHTML('é'), 'é')
-
+
def test_daterange(self):
- _20century = DateRange("19000101","20000101")
+ _20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
})
-
TEST_ID = 'gr51aVj-mLg'
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
+
class TestAnnotations(unittest.TestCase):
def setUp(self):
# Clear old files
self.tearDown()
-
def test_info_json(self):
- expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text.
+ expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
ie = youtube_dl.extractor.YoutubeIE()
ydl = YoutubeDL(params)
ydl.add_info_extractor(ie)
self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
annoxml = None
with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
- annoxml = xml.etree.ElementTree.parse(annof)
+ annoxml = xml.etree.ElementTree.parse(annof)
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
root = annoxml.getroot()
self.assertEqual(root.tag, 'document')
self.assertEqual(annotationsTag.tag, 'annotations')
annotations = annotationsTag.findall('annotation')
- #Not all the annotations have TEXT children and the annotations are returned unsorted.
+ # Not all the annotations have TEXT children and the annotations are returned unsorted.
for a in annotations:
- self.assertEqual(a.tag, 'annotation')
- if a.get('type') == 'text':
- textTag = a.find('TEXT')
- text = textTag.text
- self.assertTrue(text in expected) #assertIn only added in python 2.7
- #remove the first occurance, there could be more than one annotation with the same text
- expected.remove(text)
- #We should have seen (and removed) all the expected annotation texts.
+ self.assertEqual(a.tag, 'annotation')
+ if a.get('type') == 'text':
+ textTag = a.find('TEXT')
+ text = textTag.text
+ self.assertTrue(text in expected) # assertIn only added in python 2.7
+ # remove the first occurance, there could be more than one annotation with the same text
+ expected.remove(text)
+ # We should have seen (and removed) all the expected annotation texts.
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
-
def tearDown(self):
try_rm(ANNOTATIONS_FILE)
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertEqual(result['_type'], 'url')
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
-
+
def test_youtube_course(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
try:
ie_result = ie.extract(url)
- if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
+ if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
break
if isinstance(ie_result, list):
# Backwards compatibility: old IE result format
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
- except ExtractorError as de: # An error we somewhat expected
+ except ExtractorError as de: # An error we somewhat expected
self.report_error(compat_str(de), de.format_traceback())
break
except MaxDownloadsReached:
self.report_warning(
'Extractor %s returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor'))
+
def _fixup(r):
self.add_extra_info(r,
{
else:
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
- subfile.write(sub)
+ subfile.write(sub)
except (OSError, IOError):
self.report_error('Cannot write subtitles file ' + sub_filename)
return
for url in url_list:
try:
- #It also downloads the videos
+ # It also downloads the videos
res = self.extract_info(url)
except UnavailableVideoError:
self.report_error('unable to download video')
if encoding is None:
encoding = preferredencoding()
return encoding
-
compat_print(desc)
sys.exit(0)
-
# Conflicting, missing and erroneous options
if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error('using .netrc conflicts with giving username/password')
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
if opts.outtmpl is not None:
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
- outtmpl =((opts.outtmpl is not None and opts.outtmpl)
+ outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
ydl.add_post_processor(FFmpegAudioFixPP())
ydl.add_post_processor(AtomicParsleyPP())
-
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
if opts.exec_cmd:
BLOCK_SIZE_BYTES = 16
+
def aes_ctr_decrypt(data, key, counter):
"""
Decrypt with aes in counter mode
-
+
@param {int[]} data cipher
@param {int[]} key 16/24/32-Byte cipher key
@param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block)
"""
expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
-
- decrypted_data=[]
+
+ decrypted_data = []
for i in range(block_count):
counter_block = counter.next_value()
- block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
+ block = data[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES]
block += [0]*(BLOCK_SIZE_BYTES - len(block))
-
+
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
decrypted_data += xor(block, cipher_counter_block)
decrypted_data = decrypted_data[:len(data)]
-
+
return decrypted_data
+
def aes_cbc_decrypt(data, key, iv):
"""
Decrypt with aes in CBC mode
-
+
@param {int[]} data cipher
@param {int[]} key 16/24/32-Byte cipher key
@param {int[]} iv 16-Byte IV
"""
expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
-
- decrypted_data=[]
+
+ decrypted_data = []
previous_cipher_block = iv
for i in range(block_count):
- block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
+ block = data[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES]
block += [0]*(BLOCK_SIZE_BYTES - len(block))
-
+
decrypted_block = aes_decrypt(block, expanded_key)
decrypted_data += xor(decrypted_block, previous_cipher_block)
previous_cipher_block = block
decrypted_data = decrypted_data[:len(data)]
-
+
return decrypted_data
+
def key_expansion(data):
"""
Generate key schedule
-
+
@param {int[]} data 16/24/32-Byte cipher key
- @returns {int[]} 176/208/240-Byte expanded key
+ @returns {int[]} 176/208/240-Byte expanded key
"""
- data = data[:] # copy
+ data = data[:] # copy
rcon_iteration = 1
key_size_bytes = len(data)
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
-
+
while len(data) < expanded_key_size_bytes:
temp = data[-4:]
temp = key_schedule_core(temp, rcon_iteration)
rcon_iteration += 1
- data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
-
+ data += xor(temp, data[-key_size_bytes: 4-key_size_bytes])
+
for _ in range(3):
temp = data[-4:]
- data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
-
+ data += xor(temp, data[-key_size_bytes: 4-key_size_bytes])
+
if key_size_bytes == 32:
temp = data[-4:]
temp = sub_bytes(temp)
- data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
-
+ data += xor(temp, data[-key_size_bytes: 4-key_size_bytes])
+
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
temp = data[-4:]
- data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
+ data += xor(temp, data[-key_size_bytes: 4-key_size_bytes])
data = data[:expanded_key_size_bytes]
-
+
return data
+
def aes_encrypt(data, expanded_key):
"""
Encrypt one block with aes
-
+
@param {int[]} data 16-Byte state
- @param {int[]} expanded_key 176/208/240-Byte expanded key
+ @param {int[]} expanded_key 176/208/240-Byte expanded key
@returns {int[]} 16-Byte cipher
"""
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
data = shift_rows(data)
if i != rounds:
data = mix_columns(data)
- data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
+ data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES])
return data
+
def aes_decrypt(data, expanded_key):
"""
Decrypt one block with aes
-
+
@param {int[]} data 16-Byte cipher
@param {int[]} expanded_key 176/208/240-Byte expanded key
@returns {int[]} 16-Byte state
"""
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
-
+
for i in range(rounds, 0, -1):
- data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
+ data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES])
if i != rounds:
data = mix_columns_inv(data)
data = shift_rows_inv(data)
data = sub_bytes_inv(data)
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
-
+
return data
+
def aes_decrypt_text(data, password, key_size_bytes):
"""
Decrypt text
- The cipher key is retrieved by encrypting the first 16 Byte of 'password'
with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's)
- Mode of operation is 'counter'
-
+
@param {str} data Base64 encoded string
@param {str,unicode} password Password (will be encoded with utf-8)
@param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit
@returns {str} Decrypted data
"""
NONCE_LENGTH_BYTES = 8
-
+
data = bytes_to_intlist(base64.b64decode(data))
password = bytes_to_intlist(password.encode('utf-8'))
-
+
key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password))
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
-
+
nonce = data[:NONCE_LENGTH_BYTES]
cipher = data[NONCE_LENGTH_BYTES:]
-
+
class Counter:
__value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
+
def next_value(self):
temp = self.__value
self.__value = inc(self.__value)
return temp
-
+
decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
plaintext = intlist_to_bytes(decrypted_data)
-
+
return plaintext
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
-MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1),
- (0x1,0x2,0x3,0x1),
- (0x1,0x1,0x2,0x3),
- (0x3,0x1,0x1,0x2))
-MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9),
- (0x9,0xE,0xB,0xD),
- (0xD,0x9,0xE,0xB),
- (0xB,0xD,0x9,0xE))
+MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1),
+ (0x1, 0x2, 0x3, 0x1),
+ (0x1, 0x1, 0x2, 0x3),
+ (0x3, 0x1, 0x1, 0x2))
+MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9),
+ (0x9, 0xE, 0xB, 0xD),
+ (0xD, 0x9, 0xE, 0xB),
+ (0xB, 0xD, 0x9, 0xE))
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
+
def sub_bytes(data):
return [SBOX[x] for x in data]
+
def sub_bytes_inv(data):
return [SBOX_INV[x] for x in data]
+
def rotate(data):
return data[1:] + [data[0]]
+
def key_schedule_core(data, rcon_iteration):
data = rotate(data)
data = sub_bytes(data)
data[0] = data[0] ^ RCON[rcon_iteration]
-
+
return data
+
def xor(data1, data2):
return [x^y for x, y in zip(data1, data2)]
+
def rijndael_mul(a, b):
- if(a==0 or b==0):
+ if(a == 0 or b == 0):
return 0
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
+
def mix_column(data, matrix):
data_mixed = []
for row in range(4):
data_mixed.append(mixed)
return data_mixed
+
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
data_mixed = []
for i in range(4):
- column = data[i*4 : (i+1)*4]
+ column = data[i*4: (i+1)*4]
data_mixed += mix_column(column, matrix)
return data_mixed
+
def mix_columns_inv(data):
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
+
def shift_rows(data):
data_shifted = []
for column in range(4):
for row in range(4):
- data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
+ data_shifted.append(data[((column + row) & 0b11) * 4 + row])
return data_shifted
+
def shift_rows_inv(data):
data_shifted = []
for column in range(4):
for row in range(4):
- data_shifted.append( data[((column - row) & 0b11) * 4 + row] )
+ data_shifted.append(data[((column - row) & 0b11) * 4 + row])
return data_shifted
+
def inc(data):
- data = data[:] # copy
- for i in range(len(data)-1,-1,-1):
+ data = data[:] # copy
+ for i in range(len(data)-1, -1, -1):
if data[i] == 255:
data[i] = 0
else:
try:
import urllib.request as compat_urllib_request
-except ImportError: # Python 2
+except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
-except ImportError: # Python 2
+except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
-except ImportError: # Python 2
+except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
-except ImportError: # Python 2
+except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
-except ImportError: # Python 2
+except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import http.cookiejar as compat_cookiejar
-except ImportError: # Python 2
+except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import html.entities as compat_html_entities
-except ImportError: # Python 2
+except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import html.parser as compat_html_parser
-except ImportError: # Python 2
+except ImportError: # Python 2
import HTMLParser as compat_html_parser
try:
import http.client as compat_http_client
-except ImportError: # Python 2
+except ImportError: # Python 2
import httplib as compat_http_client
try:
try:
from urllib.parse import parse_qs as compat_parse_qs
-except ImportError: # Python 2
+except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
return parsed_result
try:
- compat_str = unicode # Python 2
+ compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
- compat_chr = unichr # Python 2
+ compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
def compat_ord(c):
- if type(c) is int: return c
- else: return ord(c)
+ if type(c) is int:
+ return c
+ else:
+ return ord(c)
if sys.version_info >= (3, 0):
drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
- if i != 1: #~user
+ if i != 1: # ~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:]
if total is None:
return None
dif = now - start
- if current == 0 or dif < 0.001: # One millisecond
+ if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
- if bytes == 0 or dif < 0.001: # One millisecond
+ if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
- new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
+ new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
})
self.try_rename(tmpfilename, filename)
return True
-
while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
prevsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('[rtmpdump] %s bytes' % prevsize)
- time.sleep(5.0) # This seems to be needed
+ time.sleep(5.0) # This seems to be needed
retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED])
cursize = os.path.getsize(encodeFilename(tmpfilename))
if prevsize == cursize and retval == RD_FAILED:
from .common import InfoExtractor
+
class AdultSwimIE(InfoExtractor):
_VALID_URL = r'https?://video\.adultswim\.com/(?P<path>.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$'
_TEST = {
-#coding: utf-8
+# coding: utf-8
from __future__ import unicode_literals
uploader_id = mobj.group('company')
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
+
def fix_html(s):
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
# The ' in the onClick attributes are not escaped, it couldn't be parsed
# like: http://trailers.apple.com/trailers/wb/gravity/
+
def _clean_json(m):
return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
s = re.sub(self._JSON_RE, _clean_json, s)
'upload_date': upload_date,
'thumbnail': thumbnail,
}
-
qualities,
)
-# There are different sources of video in arte.tv, the extraction process
+# There are different sources of video in arte.tv, the extraction process
# is different for each one. The videos usually expire in 7 days, so we can't
# add tests.
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
IE_NAME = 'audiomack'
_TESTS = [
- #hosted on audiomack
+ # hosted on audiomack
{
'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary',
'info_dict':
{
- 'id' : 'roosh-williams/extraordinary',
+ 'id': 'roosh-williams/extraordinary',
'ext': 'mp3',
'title': 'Roosh Williams - Extraordinary'
}
},
- #hosted on soundcloud via audiomack
+ # hosted on soundcloud via audiomack
{
'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
'file': '172419696.mp3',
raise ExtractorError("Unable to deduce api url of song")
realurl = api_response["url"]
- #Audiomack wraps a lot of soundcloud tracks in their branded wrapper
+ # Audiomack wraps a lot of soundcloud tracks in their branded wrapper
# - if so, pass the work off to the soundcloud extractor
if SoundcloudIE.suitable(realurl):
return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'}
_TEST = {
'url': 'http://bambuser.com/v/4050584',
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
- #u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
+ # u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
'info_dict': {
'id': '4050584',
'ext': 'flv',
initial_url = mp3_info['url']
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
m_url = re.match(re_url, initial_url)
- #We build the url we will use to get the final track url
+ # We build the url we will use to get the final track url
# This url is build in Bandcamp in the script download_bunde_*.js
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
# If we could correctly generate the .rand field the url would be
- #in the "download_url" key
+ # in the "download_url" key
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
return {
'duration': duration,
'formats': formats,
'subtitles': subtitles,
- }
\ No newline at end of file
+ }
title = self._html_search_regex(
r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
-
+
description = self._html_search_regex(
r'<meta name="description" content="([^"]*)"',
webpage, 'description', fatal=False)
-#coding: utf-8
+# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
'like_count': int(infos.find('NB_LIKES').text),
'comment_count': int(infos.find('NB_COMMENTS').text),
'formats': formats,
- }
\ No newline at end of file
+ }
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
- }
\ No newline at end of file
+ }
req.add_header('Referer', url)
playlist = self._download_xml(req, video_id)
-
+
formats = []
for i in playlist.find('smilRoot/body'):
if 'AD' not in i.attrib['id']:
from .common import InfoExtractor
from ..utils import ExtractorError
+
class Channel9IE(InfoExtractor):
'''
Common extractor for channel9.msdn.com.
'session_code': 'KOS002',
'session_day': 'Day 1',
'session_room': 'Arena 1A',
- 'session_speakers': [ 'Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen' ],
+ 'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
},
},
{
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
'duration': 1540,
'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
- 'authors': [ 'Mike Wilmot' ],
+ 'authors': ['Mike Wilmot'],
},
}
]
'format_id': x.group('quality'),
'format_note': x.group('note'),
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
- 'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
+ 'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
'preference': self._known_formats.index(x.group('quality')),
'vcodec': 'none' if x.group('note') == 'Audio only' else None,
} for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
if slides is not None:
d = common.copy()
- d.update({ 'title': title + '-Slides', 'url': slides })
+ d.update({'title': title + '-Slides', 'url': slides})
result.append(d)
if zip_ is not None:
d = common.copy()
- d.update({ 'title': title + '-Zip', 'url': zip_ })
+ d.update({'title': title + '-Zip', 'url': zip_})
result.append(d)
if len(formats) > 0:
d = common.copy()
- d.update({ 'title': title, 'formats': formats })
+ d.update({'title': title, 'formats': formats})
result.append(d)
return result
else:
raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
- else: # Assuming list
+ else: # Assuming list
return self._extract_list(content_path)
transform_source=fix_xml_ampersands)
track_doc = pdoc.find('trackList/track')
+
def find_param(name):
node = find_xpath_attr(track_doc, './/param', 'name', name)
if node is not None:
"""Report attempt to log in."""
self.to_screen('Logging in')
- #Methods for following #608
+ # Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None):
"""Returns a url that points to a page that should be processed"""
- #TODO: ie should be the class used for getting the info
+ # TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
return video_info
+
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None):
"""Returns a playlist"""
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
-
+
return (username, password)
def _get_tfa_info(self):
return {
'id': video_id,
- 'url':video_url,
+ 'url': video_url,
'title': title,
'description': description,
'timestamp': timestamp,
'comment_count': comment_count,
'height': height,
'width': width,
- }
\ No newline at end of file
+ }
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(login_request, None, False, 'Wrong login info')
-
def _real_initialize(self):
self._login()
-
def _decrypt_subtitles(self, data, iv, id):
data = bytes_to_intlist(data)
iv = bytes_to_intlist(iv)
return shaHash + [0] * 12
key = obfuscate_key(id)
+
class Counter:
__value = iv
+
def next_value(self):
temp = self.__value
self.__value = inc(self.__value)
return output
- def _real_extract(self,url):
+ def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('video_id')
-#coding: utf-8
+# coding: utf-8
from __future__ import unicode_literals
import re
unescapeHTML,
)
+
class DailymotionBaseInfoExtractor(InfoExtractor):
@staticmethod
def _build_request(url):
request.add_header('Cookie', 'ff=off')
return request
+
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
"""Information Extractor for Dailymotion"""
video_id = self._search_regex(
r"flashvars.pvg_id=\"(\d+)\";",
webpage, 'ID')
-
+
json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/'
+ video_id)
info = self._download_webpage(json_url, title,
'Downloading JSON config')
video_url = json.loads(info)['renditions'][0]['url']
-
+
return {'id': video_id,
'ext': 'mp4',
'url': video_url,
video_id = mobj.group('id')
info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
info = self._download_json(info_url, video_id)
- date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
+ date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
return {
'id': video_id,
info_url = (
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
- format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.','%2E')))
+ format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.', '%2E')))
info_webpage = self._download_webpage(
info_url, video_id, note='Downloading info page')
'duration': int_or_none(duration),
'like_count': int_or_none(like_count),
'dislike_count': int_or_none(dislike_count),
- }
\ No newline at end of file
+ }
'info_dict': {
'id': '5645318632',
'ext': 'mp4',
- "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
- "uploader_id": "forestwander-nature-pictures",
+ "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
+ "uploader_id": "forestwander-nature-pictures",
"title": "Dark Hollow Waterfalls"
}
}
'duration': duration,
'age_limit': 18,
'webpage_url': webpage_url,
- }
\ No newline at end of file
+ }
'title': video_title,
'id': video_id,
}
-
+
match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
if match:
return {
'_type': 'playlist',
'entries': entries,
}
-
'uploader_id': uploader_id,
'like_count': like_count,
'formats': formats
- }
\ No newline at end of file
+ }
(?:id="[^"]+"\s+)?
value="([^"]*)"
''', webpage))
-
+
if fields['op'] == 'download1':
post = compat_urllib_parse.urlencode(fields)
webpage2 = self._download_webpage(redirect_url, video_id)
video_url = self._html_search_regex(
r'flvMask:(.*?);', webpage2, 'video_url')
-
+
duration = parse_duration(self._search_regex(
r'<strong>Runtime:</strong>\s*([0-9:]+)</div>',
webpage, 'duration', fatal=False))
'info_dict': {
'id': '390161',
'ext': 'mp4',
- 'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
+ 'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
'title': 'How to Tie a Square Knot Properly',
}
}
},
'playlist_count': 7,
}
-
+
def _real_extract(self, url):
list_id = self._match_id(url)
webpage = self._download_webpage(url, list_id)
def _clean_query(query):
NEEDED_ARGS = ['publishedid', 'customerid']
query_dic = compat_urlparse.parse_qs(query)
- cleaned_dic = dict((k,v[0]) for (k,v) in query_dic.items() if k in NEEDED_ARGS)
+ cleaned_dic = dict((k, v[0]) for (k, v) in query_dic.items() if k in NEEDED_ARGS)
# Other player ids return m3u8 urls
cleaned_dic['playerid'] = '247'
cleaned_dic['videokbrate'] = '100000'
compilation = result['compilation']
title = result['title']
- title = '%s - %s' % (compilation, title) if compilation is not None else title
+ title = '%s - %s' % (compilation, title) if compilation is not None else title
previews = result['preview']
previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
compilation_id = mobj.group('compilationid')
season_id = mobj.group('seasonid')
- if season_id is not None: # Season link
+ if season_id is not None: # Season link
season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
playlist_id = '%s/season%s' % (compilation_id, season_id)
playlist_title = self._html_search_meta('title', season_page, 'title')
entries = self._extract_entries(season_page, compilation_id)
- else: # Compilation link
+ else: # Compilation link
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
playlist_id = compilation_id
playlist_title = self._html_search_meta('title', compilation_page, 'title')
seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
- if len(seasons) == 0: # No seasons in this compilation
+ if len(seasons) == 0: # No seasons in this compilation
entries = self._extract_entries(compilation_page, compilation_id)
else:
entries = []
compilation_id, 'Downloading season %s web page' % season_id)
entries.extend(self._extract_entries(season_page, compilation_id))
- return self.playlist_result(entries, playlist_id, playlist_title)
\ No newline at end of file
+ return self.playlist_result(entries, playlist_id, playlist_title)
'title': title,
'description': description,
}
-
xml_link = self._html_search_regex(
r'<param name="flashvars" value="config=(.*?)" />',
webpage, 'config URL')
-
+
video_id = self._search_regex(
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
xml_link, 'video ID')
xml_link, title, 'Downloading XML config')
info_json = config.find('format.json').text
info = json.loads(info_json)['versions'][0]
-
+
video_url = 'http://video720.jeuxvideo.com/' + info['file']
return {
class KankanIE(InfoExtractor):
_VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
-
+
_TEST = {
'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
'file': '48863.flv',
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
- }
\ No newline at end of file
+ }
'title': title,
'url': downloadUrl
}
-
'categories': categories,
'ext': 'mp4',
}
-
r'<div class=\'comments\'>\s*<span class=\'counter\'>(\d+)</span>', webpage, 'comment count', fatal=False)
upload_date = self._html_search_regex(
- r'<time datetime=\'([^\']+)\'>', webpage, 'upload date',fatal=False)
+ r'<time datetime=\'([^\']+)\'>', webpage, 'upload date', fatal=False)
if upload_date is not None:
upload_date = unified_strdate(upload_date)
if len(videos) == 1:
return make_entry(video_id, videos[0])
else:
- return [make_entry(video_id, media, video_number+1) for video_number, media in enumerate(videos)]
\ No newline at end of file
+ return [make_entry(video_id, media, video_number+1) for video_number, media in enumerate(videos)]
'password': password,
'remember': 'false',
'stayPut': 'false'
- }
+ }
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
login_page = self._download_webpage(request, None, 'Logging in as %s' % username)
m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
if m is not None:
response = m.group('json')
- response_json = json.loads(response)
+ response_json = json.loads(response)
state = response_json['state']
if state == 'notlogged':
mobj = re.match(self._VALID_URL, url)
course_path = mobj.group('coursepath')
course_id = mobj.group('courseid')
-
+
page = self._download_webpage('http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
course_id, 'Downloading course JSON')
course_json = json.loads(page)
course_title = course_json['Title']
- return self.playlist_result(entries, course_id, course_title)
\ No newline at end of file
+ return self.playlist_result(entries, course_id, course_title)
'duration': duration,
'view_count': view_count,
'formats': formats,
- }
\ No newline at end of file
+ }
compat_urllib_parse,
)
+
class MalemotionIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
_TEST = {
class MDRIE(InfoExtractor):
_VALID_URL = r'^(?P<domain>https?://(?:www\.)?mdr\.de)/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)(?:_|\.html)'
-
+
# No tests, MDR regularily deletes its videos
_TEST = {
'url': 'http://www.mdr.de/fakt/video189002.html',
'title': title,
'thumbnail': thumbnail,
'duration': duration,
- }
\ No newline at end of file
+ }
title = os.path.splitext(data['fname'])[0]
- #Could be several links with different quality
+ # Could be several links with different quality
links = re.findall(r'"file" : "?(.+?)",', webpage)
# Assume the links are ordered in quality
formats = [{
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
- }
\ No newline at end of file
+ }
like_count = str_to_int(self._html_search_regex(
r'<strong>Favorited</strong>\s+([^<]+)<',
webpage, 'like count', fatal=False))
-
+
upload_date = self._html_search_regex(
r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date')
if 'Ago' in upload_date:
webpage = self._download_webpage(url, video_id)
jsplayer = self._download_webpage('http://www.moviezine.se/api/player.js?video=%s' % video_id, video_id, 'Downloading js api player')
- formats =[{
+ formats = [{
'format_id': 'sd',
'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'),
'quality': 0,
'title': 'dissapeared image',
'description': 'optical illusion dissapeared image magic illusion',
}
- }
\ No newline at end of file
+ }
r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'],
False, default=None)
vcodec = src['type'].partition('/')[2]
-
+
formats.append({
'format_id': encoding_id + '-' + vcodec,
'url': src['src'],
url = response.geturl()
# Transform the url to get the best quality:
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
- return [{'url': url,'ext': 'mp4'}]
+ return [{'url': url, 'ext': 'mp4'}]
def _extract_video_formats(self, mdoc, mtvn_id):
if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None:
uri = mobj.groupdict().get('mgid')
if uri is None:
webpage = self._download_webpage(url, video_id)
-
+
# Some videos come from Vevo.com
m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";',
webpage, re.DOTALL)
vevo_id = m_vevo.group(1);
self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
-
+
uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri')
return self._get_videos_info(uri)
'is_live': True,
'thumbnail': thumbnail,
}
-
'duration': int_or_none(duration),
'view_count': int_or_none(view_count),
'formats': formats,
- }
\ No newline at end of file
+ }
player_info_page = self._download_webpage('http://player.muzu.tv/player/playerInit?ai=%s' % video_id,
video_id, u'Downloading player info')
video_info = json.loads(player_info_page)['videos'][0]
- for quality in ['1080' , '720', '480', '360']:
+ for quality in ['1080', '720', '480', '360']:
if video_info.get('v%s' % quality):
break
# Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
# Released into the Public Domain by Tristan Fischer on 2013-05-19
# https://github.com/rg3/youtube-dl/pull/842
- def __rc4crypt(self,data, key):
+ def __rc4crypt(self, data, key):
x = 0
box = list(range(256))
for i in list(range(256)):
out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
return out
- def __md5(self,s):
+ def __md5(self, s):
return hashlib.md5(s).hexdigest().encode()
- def _real_extract(self,url):
+ def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
'play_path': video_playpath,
'player_url': video_swfobj,
}
-
raise ExtractorError('couldn\'t extract vid and key')
vid = m_id.group(1)
key = m_id.group(2)
- query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key,})
+ query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, })
query_urls = compat_urllib_parse.urlencode({
'masterVid': vid,
'protocol': 'p2p',
if domain.startswith('rtmp'):
f.update({
'ext': 'flv',
- 'rtmp_protocol': '1', # rtmpt
+ 'rtmp_protocol': '1', # rtmpt
})
formats.append(f)
self._sort_formats(formats)
duration = parse_duration(
self._html_search_meta('duration', webpage, 'duration', fatal=False))
-
return {
'id': shortened_video_id,
'url': video_url,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
- }
\ No newline at end of file
+ }
mobj = re.match(self._VALID_URL, url)
music_id = mobj.group('id')
webpage = self._download_webpage(url, music_id)
-
+
title = self._html_search_regex(
r',"name":"([^"]+)",', webpage, 'music title')
uploader = self._html_search_regex(
r',"artist":"([^"]+)",', webpage, 'music uploader')
-
+
music_url_json_string = self._html_search_regex(
r'({"url":"[^"]+"),', webpage, 'music url') + '}'
music_url_json = json.loads(music_url_json_string)
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
- }
\ No newline at end of file
+ }
'uploader': uploader,
'uploader_id': uploader_id,
'formats': formats,
- }
\ No newline at end of file
+ }
path_url, video_id, 'Downloading final video url')
video_url = path_doc.find('path').text
else:
- video_url = initial_video_url
+ video_url = initial_video_url
join = compat_urlparse.urljoin
return {
'uploader_id': uploader_id,
'duration': duration,
'formats': formats,
- }
\ No newline at end of file
+ }
'url': video_url,
'title': title,
'description': description
- }
\ No newline at end of file
+ }
'title': 'youtubedl test video _BaW_jenozKc.mp4',
'description': 'Description',
}
- }
\ No newline at end of file
+ }
'duration': duration,
'view_count': view_count,
'formats': formats,
- }
\ No newline at end of file
+ }
'upload_date': upload_date,
'age_limit': 18,
'formats': formats,
- }
\ No newline at end of file
+ }
'duration': duration,
'formats': formats,
'thumbnails': thumbnails,
- }
\ No newline at end of file
+ }
}
else:
return self._extract_result(videos_info[0], videos_more_info)
-
'title': data['title'],
'description': data['subtitle'],
'entries': entries
- }
\ No newline at end of file
+ }
from .common import InfoExtractor
from ..utils import int_or_none
+
class PodomaticIE(InfoExtractor):
IE_NAME = 'podomatic'
_VALID_URL = r'^(?P<proto>https?)://(?P<channel>[^.]+)\.podomatic\.com/entry/(?P<id>[^?]+)'
comment_count = self._extract_count(
r'All comments \(<var class="videoCommentCount">([\d,\.]+)</var>', webpage, 'comment')
- video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
+ video_urls = list(map(compat_urllib_parse.unquote, re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
if webpage.find('"encrypted":true') != -1:
password = compat_urllib_parse.unquote_plus(self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
video_url = self._search_regex(VIDEO_URL_RE, webpage, 'video url')
video_url = compat_urllib_parse.unquote(video_url)
- #Get the uploaded date
+ # Get the uploaded date
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, 'upload date', fatal=False)
if upload_date:
'upload_date': upload_date,
'duration': duration,
'formats': formats,
- }
\ No newline at end of file
+ }
if captions.endswith(STL_EXT):
captions = captions[:-len(STL_EXT)] + SRT_EXT
subtitles['it'] = 'http://www.rai.tv%s' % compat_urllib_parse.quote(captions)
- return subtitles
\ No newline at end of file
+ return subtitles
'thumbnail': thumbnail_url,
'description': description,
}
-
playerdata = self._download_xml(playerdata_url, video_id, 'Downloading player data XML')
videoinfo = playerdata.find('./playlist/videoinfo')
-
+
formats = []
for filename in videoinfo.findall('filename'):
mobj = re.search(r'(?P<url>rtmpe://(?:[^/]+/){2})(?P<play_path>.+)', filename.text)
'upload_date': upload_date,
'duration': duration,
'formats': formats,
- }
\ No newline at end of file
+ }
return url
-
class RTVEALaCartaIE(InfoExtractor):
IE_NAME = 'rtve.es:alacarta'
IE_DESC = 'RTVE a la carta'
'view_count': view_count,
'duration': duration,
'formats': formats,
- }
\ No newline at end of file
+ }
'description': description,
'thumbnail': 'http://www.scivee.tv/assets/videothumb/%s' % video_id,
'formats': formats,
- }
\ No newline at end of file
+ }
'title': title,
'entries': entries,
}
-
-
\ No newline at end of file
'filesize': filesize,
'title': title,
'thumbnail': thumbnail,
- }
\ No newline at end of file
+ }
},
'playlist_mincount': 4,
}
-
+
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
community_id = mobj.group('communityid')
full_title = resolve_title = '%s/%s' % (uploader, slug_title)
if token:
resolve_title += '/%s' % token
-
+
self.report_resolve(full_title)
-
+
url = 'http://soundcloud.com/%s' % resolve_title
info_json_url = self._resolv_url(url)
info = self._download_json(info_json_url, full_title, 'Downloading info JSON')
'description': description,
'duration': duration,
'thumbnails': thumbnails
- }
\ No newline at end of file
+ }
'duration': duration,
'categories': categories,
'formats': formats,
- }
\ No newline at end of file
+ }
'rtmp_live': asset.get('live'),
'timestamp': parse_iso8601(asset.get('date')),
}
-
sub_lang_list = {}
for sub_lang in requested_langs:
- if not sub_lang in available_subs_list:
+ if sub_lang not in available_subs_list:
self._downloader.report_warning(u'no closed captions found in the specified language "%s"' % sub_lang)
continue
sub_lang_list[sub_lang] = available_subs_list[sub_lang]
urls = []
webpage = self._download_webpage(url, user_id)
urls.extend(re.findall(self._MEDIA_RE, webpage))
-
+
pages = re.findall(r'/ajax-user/user-videos/%s\?page=([0-9]+)' % user_id, webpage)[:-1]
for p in pages:
more = 'http://www.teachertube.com/ajax-user/user-videos/%s?page=%s' % (user_id, p)
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
-
+
video_id = mobj.group("video_id")
if not video_id:
video_id = self._html_search_regex(
-#coding: utf-8
+# coding: utf-8
from __future__ import unicode_literals
from .mitele import MiTeleIE
'skip_download': True,
},
}
+
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
if mobj.group('config'):
- config_url = url+ '&form=json'
+ config_url = url + '&form=json'
config_url = config_url.replace('swf/', 'config/')
config_url = config_url.replace('onsite/', 'onsite/config/')
config = self._download_json(config_url, video_id, 'Downloading config')
smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
'format=smil&mbr=true'.format(video_id))
-
meta = self._download_xml(smil_url, video_id)
try:
error_msg = next(
-#coding: utf-8
+# coding: utf-8
from __future__ import unicode_literals
import re
r': <a href="http://www.thisav.com/user/[0-9]+/([^"]+)">(?:[^<]+)</a>',
webpage, 'uploader id', fatal=False)
ext = determine_ext(video_url)
-
+
return {
'id': video_id,
'url': video_url,
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id, 'Downloading page')
-
+
mobj = re.search(r'(?m)fo\.addVariable\("file",\s"(?P<fileid>[\da-z]+)"\);\n'
'\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage)
if mobj is None:
'url': video_url,
'thumbnail': thumbnail,
'title': title
- }
\ No newline at end of file
+ }
fmt['height'] = int(m.group(1))
formats.append(fmt)
self._sort_formats(formats)
-
+
return {
'id': video_id,
'display_id': display_id,
webpage = self._download_webpage(url, name)
title = self._search_regex(r'<title>(.+?)</title>',
- webpage, 'video title').replace(' - Trailer Addict','')
+ webpage, 'video title').replace(' - Trailer Addict', '')
view_count_str = self._search_regex(
r'<span class="views_n">([0-9,.]+)</span>',
webpage, 'view count', fatal=False)
fvar = "fvar"
info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id))
- info_webpage = self._download_webpage(info_url, video_id , "Downloading the info webpage")
+ info_webpage = self._download_webpage(info_url, video_id, "Downloading the info webpage")
final_url = self._search_regex(r'&fileurl=(.+)',
- info_webpage, 'Download url').replace('%3F','?')
+ info_webpage, 'Download url').replace('%3F', '?')
thumbnail_url = self._search_regex(r'&image=(.+?)&',
info_webpage, 'thumbnail url')
format_url, video_id,
note=u'Downloading formats',
errnote=u'Error while downloading formats')
-
+
video_url_template = (
u'http://fs%(server)s.trilulilu.ro/stream.php?type=video'
u'&source=site&hash=%(hash)s&username=%(userid)s&'
'description': description,
'thumbnail': thumbnail,
}
-
if quality:
info_url += '&hd' + quality
webpage = self._download_webpage(info_url, id, "Opening the info webpage")
- final_url = self._html_search_regex('>(.+?)</f>',webpage, 'video url')
+ final_url = self._html_search_regex('>(.+?)</f>', webpage, 'video url')
return final_url
def _real_extract(self, url):
'duration': duration,
'age_limit': age_limit,
'formats': formats,
- }
\ No newline at end of file
+ }
for asset in response if asset.get('assetType') == 'Video'
]
- return self.playlist_result(entries, course_id, course_title)
\ No newline at end of file
+ return self.playlist_result(entries, course_id, course_title)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
- raise ExtractorError('No video found', expected=True)
\ No newline at end of file
+ raise ExtractorError('No video found', expected=True)
'duration': float_or_none(data['duration'], 1000),
'view_count': data['displays'],
'formats': formats,
- }
\ No newline at end of file
+ }
except ExtractorError:
raise ExtractorError('The page doesn\'t contain a video', expected=True)
return self.url_result(ooyala_url, ie='Ooyala')
-
'view_count': view_count,
'formats': formats,
'age_limit': 18,
- }
\ No newline at end of file
+ }
determine_ext,
)
+
class VideofyMeIE(InfoExtractor):
_VALID_URL = r'https?://(www\.videofy\.me/.+?|p\.videofy\.me/v)/(?P<id>\d+)(&|#|$)'
IE_NAME = u'videofy.me'
u'uploader': u'VideofyMe',
u'uploader_id': u'thisisvideofyme',
},
-
+
}
def _real_extract(self, url):
video_id)
video = config.find('video')
sources = video.find('sources')
- url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key)
+ url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key)
for key in ['on', 'av', 'off']] if node is not None)
video_url = url_node.find('url').text
'like_count': int_or_none(video['liked']),
'dislike_count': int_or_none(video['disliked']),
'formats': formats,
- }
\ No newline at end of file
+ }
'title': 'optical illusion dissapeared image magic illusion',
'description': ''
},
- }
\ No newline at end of file
+ }
-#coding: utf-8
+# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
def _real_extract(self, url):
video_id = self._match_id(url)
-
+
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'{\s*file\s*:\s*"([^"]+)"\s*}', webpage, 'video url')
title = self._html_search_regex(
r'(?s)<h2 class="video-title">(.*?)</h2>', webpage, 'title')
-
+
return {
'id': video_id,
'title': title,
'url': video_url,
}
-
\ No newline at end of file
'timestamp': 1413835980.560,
'upload_date': '20141020',
'duration': 3238,
- }
+ }
},
# cobra.be
{
'timestamp': timestamp,
'duration': duration,
'formats': formats,
- }
\ No newline at end of file
+ }
'upload_date': upload_date,
}
-# TODO test _1
\ No newline at end of file
+# TODO test _1
'title': video_title,
'thumbnail': thumbnail,
}
-
'thumbnail': thumbnail,
'age_limit': 18,
}
-
}
]
- def _real_extract(self,url):
+ def _real_extract(self, url):
def extract_video_url(webpage):
mp4 = re.search(r'<video\s+.*?file="([^"]+)".*?>', webpage)
if mp4 is None:
'title': title,
'formats': self._extract_f4m_formats(f4m_url, video_id),
'thumbnail': self._og_search_thumbnail(webpage),
- }
\ No newline at end of file
+ }
def _gen_sid(self):
nowTime = int(time.time() * 1000)
- random1 = random.randint(1000,1998)
- random2 = random.randint(1000,9999)
+ random1 = random.randint(1000, 1998)
+ random2 = random.randint(1000, 9999)
- return "%d%d%d" %(nowTime,random1,random2)
+ return "%d%d%d" % (nowTime, random1, random2)
def _get_file_ID_mix_string(self, seed):
mixed = []
index = math.floor(seed / 65536 * len(source))
mixed.append(source[int(index)])
source.remove(source[int(index)])
- #return ''.join(mixed)
+ # return ''.join(mixed)
return mixed
def _get_file_id(self, fileId, seed):
keys = [s['k'] for s in config['data'][0]['segs'][format]]
# segs is usually a dictionary, but an empty *list* if an error occured.
- files_info=[]
+ files_info = []
sid = self._gen_sid()
fileid = self._get_file_id(fileid, seed)
- #column 8,9 of fileid represent the segment number
- #fileid[7:9] should be changed
+ # column 8,9 of fileid represent the segment number
+ # fileid[7:9] should be changed
for index, key in enumerate(keys):
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
download_url = 'http://k.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
for encrypted_link in encrypted_links:
link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8')
links.append(link)
-
+
formats = []
for link in links:
# A link looks like this:
if not formats:
raise ExtractorError(u'ERROR: no known formats available for video')
-
+
return {
'id': video_id,
'uploader': video_uploader,
uppercase_escape,
)
+
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
# Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
# chokes on unicode
- login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
+ login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
'service': 'youtube',
'hl': 'en_US',
}
- tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in tfa_form_strs.items())
+ tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in tfa_form_strs.items())
tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
- if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
+ if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr':
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
def _extract_from_m3u8(self, manifest_url, video_id):
url_map = {}
+
def _get_urls(_manifest):
lines = _manifest.split('\n')
urls = filter(lambda l: l and not l.startswith('#'),
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
- video_annotations = self._extract_annotations(video_id)
+ video_annotations = self._extract_annotations(video_id)
# Decide which formats to download
try:
'player_url': player_url,
}]
elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
- encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
+ encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
url_map = {}
dash_manifest_url = video_info.get('dashmpd')[0]
else:
dash_manifest_url = ytplayer_config['args']['dashmpd']
+
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
'formats': formats,
}
+
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
)
(
(?:PL|LL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
- # Top tracks, they can also include dots
+ # Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
<span[^>]*>.*?%s.*?</span>''' % re.escape(query),
channel_page, 'list')
url = compat_urlparse.urljoin('https://www.youtube.com/', link)
-
+
video_re = r'data-index="\d+".*?data-video-id="([0-9A-Za-z_-]{11})"'
ids = []
# sometimes the webpage doesn't contain the videos
ids_in_page = self.extract_videos_from_page(page['content_html'])
video_ids.extend(ids_in_page)
-
+
if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
break
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
- if any(ie.suitable(url) for ie in other_ies): return False
- else: return super(YoutubeUserIE, cls).suitable(url)
+ if any(ie.suitable(url) for ie in other_ies):
+ return False
+ else:
+ return super(YoutubeUserIE, cls).suitable(url)
def _real_extract(self, url):
# Extract username
paging = mobj.group('paging')
return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
+
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
+
class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
_PLAYLIST_TITLE = 'Youtube Watch Later'
_PERSONAL_FEED = True
+
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, "ythistory" keyword (requires authentication)'
_VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
_PERSONAL_FEED = True
_PLAYLIST_TITLE = 'Youtube Watch History'
+
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
postproc.add_option(
'--exec',
metavar='CMD', dest='exec_cmd',
- help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'' )
+ help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
parser.add_option_group(general)
parser.add_option_group(selection)
'Command returned error code %d' % retCode)
return None, information # by default, keep file and do nothing
-
extension = 'wav'
more_opts += ['-f', 'wav']
- prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
+ prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
new_path = prefix + sep + extension
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
self._downloader.to_screen(u'[' + self._executable + '] Destination: ' + new_path)
self.run_ffmpeg(path, new_path, acodec, more_opts)
except:
- etype,e,tb = sys.exc_info()
+ etype, e, tb = sys.exc_info()
if isinstance(e, AudioConversionError):
msg = u'audio conversion failed: ' + e.msg
else:
self._downloader.report_warning(u'Cannot update utime of audio file')
information['filepath'] = new_path
- return self._nopostoverwrites,information
+ return self._nopostoverwrites, information
class FFmpegVideoConvertor(FFmpegPostProcessor):
- def __init__(self, downloader=None,preferedformat=None):
+ def __init__(self, downloader=None, preferedformat=None):
super(FFmpegVideoConvertor, self).__init__(downloader)
- self._preferedformat=preferedformat
+ self._preferedformat = preferedformat
def run(self, information):
path = information['filepath']
outpath = prefix + sep + self._preferedformat
if information['ext'] == self._preferedformat:
self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
- return True,information
- self._downloader.to_screen(u'['+'ffmpeg'+'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) +outpath)
+ return True, information
+ self._downloader.to_screen(u'['+'ffmpeg'+'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath)
self.run_ffmpeg(path, outpath, [])
information['filepath'] = outpath
information['format'] = self._preferedformat
information['ext'] = self._preferedformat
- return False,information
+ return False, information
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
self._downloader.to_screen(u'[ffmpeg] Subtitles can only be embedded in mp4 files')
return True, information
if not information.get('subtitles'):
- self._downloader.to_screen(u'[ffmpeg] There aren\'t any subtitles to embed')
+ self._downloader.to_screen(u'[ffmpeg] There aren\'t any subtitles to embed')
return True, information
sub_langs = [key for key in information['subtitles']]
except (subprocess.CalledProcessError, OSError):
self._downloader.report_error("This filesystem doesn't support extended attributes. (You may have to enable them in your /etc/fstab)")
return False, info
-
avm_class.method_pyfunctions[func_name] = resfunc
return resfunc
-
)
from .version import __version__
+
def rsa_verify(message, signature, key):
from struct import pack
from hashlib import sha256
from sys import version_info
+
def b(x):
- if version_info[0] == 2: return x
- else: return x.encode('latin1')
+ if version_info[0] == 2:
+ return x
+ else:
+ return x.encode('latin1')
assert(type(message) == type(b('')))
block_size = 0
n = key[0]
raw_bytes.insert(0, pack("B", signature & 0xFF))
signature >>= 8
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
- if signature[0:2] != b('\x00\x01'): return False
+ if signature[0:2] != b('\x00\x01'):
+ return False
signature = signature[2:]
- if not b('\x00') in signature: return False
+ if not b('\x00') in signature:
+ return False
signature = signature[signature.index(b('\x00'))+1:]
- if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
+ if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')):
+ return False
signature = signature[19:]
- if signature != sha256(message).digest(): return False
+ if signature != sha256(message).digest():
+ return False
return True
try:
newversion = compat_urllib_request.urlopen(VERSION_URL).read().decode('utf-8').strip()
except:
- if verbose: to_screen(compat_str(traceback.format_exc()))
+ if verbose:
+ to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: can\'t find the current version. Please try again later.')
return
if newversion == __version__:
versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except:
- if verbose: to_screen(compat_str(traceback.format_exc()))
+ if verbose:
+ to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: can\'t obtain versions info. Please try again later.')
return
if not 'signature' in versions_info:
newcontent = urlh.read()
urlh.close()
except (IOError, OSError):
- if verbose: to_screen(compat_str(traceback.format_exc()))
+ if verbose:
+ to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to download latest version')
return
with open(exe + '.new', 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError):
- if verbose: to_screen(compat_str(traceback.format_exc()))
+ if verbose:
+ to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to write the new version')
return
subprocess.Popen([bat]) # Continues to run in the background
return # Do not show premature success messages
except (IOError, OSError):
- if verbose: to_screen(compat_str(traceback.format_exc()))
+ if verbose:
+ to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to overwrite current version')
return
newcontent = urlh.read()
urlh.close()
except (IOError, OSError):
- if verbose: to_screen(compat_str(traceback.format_exc()))
+ if verbose:
+ to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to download latest version')
return
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError):
- if verbose: to_screen(compat_str(traceback.format_exc()))
+ if verbose:
+ to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to overwrite current version')
return
to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
+
def get_notes(versions, fromVersion):
notes = []
- for v,vdata in sorted(versions.items()):
+ for v, vdata in sorted(versions.items()):
if v > fromVersion:
notes.extend(vdata.get('notes', []))
return notes
+
def print_notes(to_screen, versions, fromVersion=__version__):
notes = get_notes(versions, fromVersion)
if notes:
'Accept-Language': 'en-us,en;q=0.5',
}
+
def preferredencoding():
"""Get preferred encoding.
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
+
+
def xpath_with_ns(path, ns_map):
components = [c.split(':') for c in path.split('/')]
replaced = []
timestamp = email.utils.mktime_tz(timetuple)
return timestamp
+
def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters.
result = '_'
return result
+
def orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
assert isinstance(optval, compat_str)
return optval
+
def formatSeconds(secs):
if secs > 3600:
return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
class ExtractorError(Exception):
"""Error during info extraction."""
+
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
configured to continue on errors. They will contain the appropriate
error message.
"""
+
def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task.
"""
+
def __init__(self, msg):
self.msg = msg
+
class MaxDownloadsReached(Exception):
""" --max-downloads limit has been reached. """
pass
self.downloaded = downloaded
self.expected = expected
+
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
return None
upload_date = None
- #Replace commas
+ # Replace commas
date_str = date_str.replace(',', ' ')
# %z (UTC offset) is only supported in python>=3.2
date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
return upload_date
+
def determine_ext(url, default_ext='unknown_video'):
if url is None:
return default_ext
else:
return default_ext
+
def subtitles_filename(filename, sub_lang, sub_format):
return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
+
def date_from_str(date_str):
"""
Return a datetime object from a string in the format YYYYMMDD or
if sign == '-':
time = -time
unit = match.group('unit')
- #A bad aproximation?
+ # A bad aproximation?
if unit == 'month':
unit = 'day'
time *= 30
delta = datetime.timedelta(**{unit: time})
return today + delta
return datetime.datetime.strptime(date_str, "%Y%m%d").date()
-
+
+
def hyphenate_date(date_str):
"""
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
else:
return date_str
+
class DateRange(object):
"""Represents a time interval between two dates"""
+
def __init__(self, start=None, end=None):
"""start and end must be strings in the format accepted by date"""
if start is not None:
self.end = datetime.datetime.max.date()
if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
+
@classmethod
def day(cls, day):
"""Returns a range that only contains the given day"""
- return cls(day,day)
+ return cls(day, day)
+
def __contains__(self, date):
"""Check if the date is in the range"""
if not isinstance(date, datetime.date):
date = date_from_str(date)
return self.start <= date <= self.end
+
def __str__(self):
- return '%s - %s' % ( self.start.isoformat(), self.end.isoformat())
+ return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
def platform_name():
def prepend_extension(filename, ext):
- name, real_ext = os.path.splitext(filename)
+ name, real_ext = os.path.splitext(filename)
return '{0}.{1}{2}'.format(name, ext, real_ext)