From d915fe0b0cc3405ea552dba87609a5f8d2e7a691 Mon Sep 17 00:00:00 2001 From: Laura Liberda Date: Fri, 1 Jan 2021 07:05:16 +0100 Subject: [PATCH] [generic] embed searching normalization 2/n --- haruhi_dl/extractor/apa.py | 2 +- haruhi_dl/extractor/channel9.py | 2 +- haruhi_dl/extractor/cloudflarestream.py | 2 +- haruhi_dl/extractor/dailymail.py | 2 +- haruhi_dl/extractor/dailymotion.py | 2 +- haruhi_dl/extractor/dbtv.py | 2 +- haruhi_dl/extractor/drtuber.py | 2 +- haruhi_dl/extractor/expressen.py | 2 +- haruhi_dl/extractor/facebook.py | 2 +- haruhi_dl/extractor/foxnews.py | 2 +- haruhi_dl/extractor/generic.py | 3 ++- haruhi_dl/extractor/indavideo.py | 2 +- haruhi_dl/extractor/joj.py | 2 +- haruhi_dl/extractor/jwplatform.py | 2 +- haruhi_dl/extractor/liveleak.py | 2 +- haruhi_dl/extractor/megaphone.py | 2 +- haruhi_dl/extractor/mofosex.py | 2 +- haruhi_dl/extractor/nexx.py | 4 ++-- haruhi_dl/extractor/onnetwork.py | 2 +- haruhi_dl/extractor/peertube.py | 9 +++++---- haruhi_dl/extractor/pornhub.py | 2 +- haruhi_dl/extractor/redtube.py | 2 +- haruhi_dl/extractor/rutube.py | 2 +- haruhi_dl/extractor/soundcloud.py | 2 +- haruhi_dl/extractor/spankwire.py | 2 +- haruhi_dl/extractor/sportbox.py | 2 +- haruhi_dl/extractor/springboardplatform.py | 2 +- haruhi_dl/extractor/theplatform.py | 2 +- haruhi_dl/extractor/tnaflix.py | 2 +- haruhi_dl/extractor/tube8.py | 2 +- haruhi_dl/extractor/tunein.py | 2 +- haruhi_dl/extractor/twentymin.py | 2 +- haruhi_dl/extractor/vice.py | 2 +- haruhi_dl/extractor/videa.py | 2 +- haruhi_dl/extractor/videopress.py | 2 +- haruhi_dl/extractor/viqeo.py | 2 +- haruhi_dl/extractor/vshare.py | 2 +- haruhi_dl/extractor/vzaar.py | 2 +- haruhi_dl/extractor/washingtonpost.py | 2 +- haruhi_dl/extractor/xfileshare.py | 2 +- haruhi_dl/extractor/xhamster.py | 2 +- haruhi_dl/extractor/yapfiles.py | 2 +- haruhi_dl/extractor/youporn.py | 2 +- haruhi_dl/extractor/youtube.py | 2 +- haruhi_dl/extractor/zype.py | 2 +- 45 files changed, 51 insertions(+), 49 deletions(-) diff --git a/haruhi_dl/extractor/apa.py b/haruhi_dl/extractor/apa.py index 98ccdaa4a..aa07e07b9 100644 --- a/haruhi_dl/extractor/apa.py +++ b/haruhi_dl/extractor/apa.py @@ -38,7 +38,7 @@ class APAIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [ mobj.group('url') for mobj in re.finditer( diff --git a/haruhi_dl/extractor/channel9.py b/haruhi_dl/extractor/channel9.py index 09cacf6d3..c1f11a065 100644 --- a/haruhi_dl/extractor/channel9.py +++ b/haruhi_dl/extractor/channel9.py @@ -82,7 +82,7 @@ class Channel9IE(InfoExtractor): _RSS_URL = 'http://channel9.msdn.com/%s/RSS' @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+src=["\'](https?://channel9\.msdn\.com/(?:[^/]+/)+)player\b', webpage) diff --git a/haruhi_dl/extractor/cloudflarestream.py b/haruhi_dl/extractor/cloudflarestream.py index 2fdcfbb3a..baf765011 100644 --- a/haruhi_dl/extractor/cloudflarestream.py +++ b/haruhi_dl/extractor/cloudflarestream.py @@ -41,7 +41,7 @@ class CloudflareStreamIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [ mobj.group('url') for mobj in re.finditer( diff --git a/haruhi_dl/extractor/dailymail.py b/haruhi_dl/extractor/dailymail.py index 67b88fd56..8dd29f579 100644 --- a/haruhi_dl/extractor/dailymail.py +++ b/haruhi_dl/extractor/dailymail.py @@ -30,7 +30,7 @@ class DailyMailIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+\bsrc=["\'](?P(?:https?:)?//(?:www\.)?dailymail\.co\.uk/embed/video/\d+\.html)', webpage) diff --git a/haruhi_dl/extractor/dailymotion.py b/haruhi_dl/extractor/dailymotion.py index b8529050c..5b4b9bbde 100644 --- a/haruhi_dl/extractor/dailymotion.py +++ b/haruhi_dl/extractor/dailymotion.py @@ -191,7 +191,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor): xid''' @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): urls = [] # Look for embedded Dailymotion player # https://developer.dailymotion.com/player#player-parameters diff --git a/haruhi_dl/extractor/dbtv.py b/haruhi_dl/extractor/dbtv.py index aaedf2e3d..8cc3b6a5f 100644 --- a/haruhi_dl/extractor/dbtv.py +++ b/haruhi_dl/extractor/dbtv.py @@ -32,7 +32,7 @@ class DBTVIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [url for _, url in re.findall( r']+src=(["\'])((?:https?:)?//(?:www\.)?dagbladet\.no/video/embed/(?:[0-9A-Za-z_-]{11}|[a-zA-Z0-9]{8}).*?)\1', webpage)] diff --git a/haruhi_dl/extractor/drtuber.py b/haruhi_dl/extractor/drtuber.py index 2baea585b..5822d39b6 100644 --- a/haruhi_dl/extractor/drtuber.py +++ b/haruhi_dl/extractor/drtuber.py @@ -36,7 +36,7 @@ class DrTuberIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+?src=["\'](?P(?:https?:)?//(?:www\.)?drtuber\.com/embed/\d+)', webpage) diff --git a/haruhi_dl/extractor/expressen.py b/haruhi_dl/extractor/expressen.py index dc8b855d2..c0ebcf483 100644 --- a/haruhi_dl/extractor/expressen.py +++ b/haruhi_dl/extractor/expressen.py @@ -48,7 +48,7 @@ class ExpressenIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [ mobj.group('url') for mobj in re.finditer( r']+\bsrc=(["\'])(?P(?:https?:)?//(?:www\.)?(?:expressen|di)\.se/(?:tvspelare/video|videoplayer/embed)/tv/.+?)\1', diff --git a/haruhi_dl/extractor/facebook.py b/haruhi_dl/extractor/facebook.py index 610d66745..72781bd80 100644 --- a/haruhi_dl/extractor/facebook.py +++ b/haruhi_dl/extractor/facebook.py @@ -225,7 +225,7 @@ class FacebookIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): urls = [] for mobj in re.finditer( r']+?src=(["\'])(?Phttps?://www\.facebook\.com/(?:video/embed|plugins/video\.php).+?)\1', diff --git a/haruhi_dl/extractor/foxnews.py b/haruhi_dl/extractor/foxnews.py index 63613cb85..e799ba399 100644 --- a/haruhi_dl/extractor/foxnews.py +++ b/haruhi_dl/extractor/foxnews.py @@ -59,7 +59,7 @@ class FoxNewsIE(AMPIE): ] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [ mobj.group('url') for mobj in re.finditer( diff --git a/haruhi_dl/extractor/generic.py b/haruhi_dl/extractor/generic.py index 26ec68fe7..66f80a8ee 100644 --- a/haruhi_dl/extractor/generic.py +++ b/haruhi_dl/extractor/generic.py @@ -2563,7 +2563,8 @@ class GenericIE(InfoExtractor): OnNetworkLoaderIE, ): try: - embie_urls = embie._extract_urls(webpage) + embie_urls = embie._extract_urls(webpage, + url=url) if embie_urls: return self.playlist_from_matches(embie_urls, video_id, video_title, ie=embie.ie_key()) except Exception as exc: diff --git a/haruhi_dl/extractor/indavideo.py b/haruhi_dl/extractor/indavideo.py index 4c16243ec..073044464 100644 --- a/haruhi_dl/extractor/indavideo.py +++ b/haruhi_dl/extractor/indavideo.py @@ -49,7 +49,7 @@ class IndavideoEmbedIE(InfoExtractor): # http://palyazat.indavideo.hu/video/Embertelen_dal_Dodgem_egyuttes @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+\bsrc=["\'](?P(?:https?:)?//embed\.indavideo\.hu/player/video/[\da-f]+)', webpage) diff --git a/haruhi_dl/extractor/joj.py b/haruhi_dl/extractor/joj.py index 62b28e980..8b2f15ba9 100644 --- a/haruhi_dl/extractor/joj.py +++ b/haruhi_dl/extractor/joj.py @@ -41,7 +41,7 @@ class JojIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [ mobj.group('url') for mobj in re.finditer( diff --git a/haruhi_dl/extractor/jwplatform.py b/haruhi_dl/extractor/jwplatform.py index c34b5f5e6..b1c6adbd7 100644 --- a/haruhi_dl/extractor/jwplatform.py +++ b/haruhi_dl/extractor/jwplatform.py @@ -31,7 +31,7 @@ class JWPlatformIE(InfoExtractor): return urls[0] if urls else None @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r'<(?:script|iframe)[^>]+?src=["\']((?:https?:)?//(?:content\.jwplatform|cdn\.jwplayer)\.com/players/[a-zA-Z0-9]{8})', webpage) diff --git a/haruhi_dl/extractor/liveleak.py b/haruhi_dl/extractor/liveleak.py index 48f8a8c2e..4f3c6609d 100644 --- a/haruhi_dl/extractor/liveleak.py +++ b/haruhi_dl/extractor/liveleak.py @@ -89,7 +89,7 @@ class LiveLeakIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+src="(https?://(?:\w+\.)?liveleak\.com/ll_embed\?[^"]*[ift]=[\w_]+[^"]+)"', webpage) diff --git a/haruhi_dl/extractor/megaphone.py b/haruhi_dl/extractor/megaphone.py index 5bafa6cf4..3f7a10a68 100644 --- a/haruhi_dl/extractor/megaphone.py +++ b/haruhi_dl/extractor/megaphone.py @@ -50,6 +50,6 @@ class MegaphoneIE(InfoExtractor): } @classmethod - def _extract_urls(cls, webpage): + def _extract_urls(cls, webpage, **kwargs): return [m[0] for m in re.findall( r']*?\ssrc=["\'](%s)' % cls._VALID_URL, webpage)] diff --git a/haruhi_dl/extractor/mofosex.py b/haruhi_dl/extractor/mofosex.py index 5234cac02..b1ee38eaf 100644 --- a/haruhi_dl/extractor/mofosex.py +++ b/haruhi_dl/extractor/mofosex.py @@ -67,7 +67,7 @@ class MofosexEmbedIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+\bsrc=["\']((?:https?:)?//(?:www\.)?mofosex\.com/embed/?\?.*?\bvideoid=\d+)', webpage) diff --git a/haruhi_dl/extractor/nexx.py b/haruhi_dl/extractor/nexx.py index 586c1b7eb..0ddc62fbd 100644 --- a/haruhi_dl/extractor/nexx.py +++ b/haruhi_dl/extractor/nexx.py @@ -113,7 +113,7 @@ class NexxIE(InfoExtractor): return mobj.group('id') if mobj else None @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): # Reference: # 1. https://nx-s.akamaized.net/files/201510/44.pdf @@ -436,7 +436,7 @@ class NexxEmbedIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): # Reference: # 1. https://nx-s.akamaized.net/files/201510/44.pdf diff --git a/haruhi_dl/extractor/onnetwork.py b/haruhi_dl/extractor/onnetwork.py index 169cec0d9..308f60f0d 100644 --- a/haruhi_dl/extractor/onnetwork.py +++ b/haruhi_dl/extractor/onnetwork.py @@ -24,7 +24,7 @@ class OnNetworkLoaderIE(InfoExtractor): _VALID_URL = r'''https?://video\.onnetwork\.tv/embed\.php\?(?:mid=(?P[^&]+))?(?:&?sid=(?P[^&\s]+))?(?:&?cId=onn-cid-(?P\d+))?(?:.+)?''' @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): matches = re.finditer( r''']*src=["'](%s.*?)["']''' % OnNetworkLoaderIE._VALID_URL, webpage) diff --git a/haruhi_dl/extractor/peertube.py b/haruhi_dl/extractor/peertube.py index a3149ba5e..e2912b44e 100644 --- a/haruhi_dl/extractor/peertube.py +++ b/haruhi_dl/extractor/peertube.py @@ -72,11 +72,12 @@ class PeerTubeSHIE(SelfhostedInfoExtractor): }] @staticmethod - def _extract_urls(webpage): - entries = re.findall( - r'''(?x)]+\bsrc=["\'](?P(?:https?:)?//[^/]+/videos/embed/%s)''' + def _extract_urls(webpage, **kwargs): + entries = re.finditer( + r'''(?x)]+\bsrc=["\'](?:https?:)?//(?P[^/]+)/videos/embed/(?P%s)''' % (PeerTubeSHIE._UUID_RE), webpage) - return entries + return ['peertube:%s:%s' % (mobj.group('host'), mobj.group('video_id')) + for mobj in entries] def _call_api(self, host, video_id, path, note=None, errnote=None, fatal=True): return self._download_json( diff --git a/haruhi_dl/extractor/pornhub.py b/haruhi_dl/extractor/pornhub.py index a7301b671..13d5a21df 100644 --- a/haruhi_dl/extractor/pornhub.py +++ b/haruhi_dl/extractor/pornhub.py @@ -158,7 +158,7 @@ class PornHubIE(PornHubBaseIE): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+?src=["\'](?P(?:https?:)?//(?:www\.)?pornhub\.(?:com|net)/embed/[\da-z]+)', webpage) diff --git a/haruhi_dl/extractor/redtube.py b/haruhi_dl/extractor/redtube.py index a1ca791ca..942fa3eba 100644 --- a/haruhi_dl/extractor/redtube.py +++ b/haruhi_dl/extractor/redtube.py @@ -37,7 +37,7 @@ class RedTubeIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+?src=["\'](?P(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)', webpage) diff --git a/haruhi_dl/extractor/rutube.py b/haruhi_dl/extractor/rutube.py index 8f54d5675..f69026b7d 100644 --- a/haruhi_dl/extractor/rutube.py +++ b/haruhi_dl/extractor/rutube.py @@ -133,7 +133,7 @@ class RutubeIE(RutubeBaseIE): return False if RutubePlaylistIE.suitable(url) else super(RutubeIE, cls).suitable(url) @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [mobj.group('url') for mobj in re.finditer( r']+?src=(["\'])(?P(?:https?:)?//rutube\.ru/embed/[\da-z]{32}.*?)\1', webpage)] diff --git a/haruhi_dl/extractor/soundcloud.py b/haruhi_dl/extractor/soundcloud.py index 0b787444c..896fa77a4 100644 --- a/haruhi_dl/extractor/soundcloud.py +++ b/haruhi_dl/extractor/soundcloud.py @@ -40,7 +40,7 @@ class SoundcloudEmbedIE(InfoExtractor): } @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [m.group('url') for m in re.finditer( r']+src=(["\'])(?P(?:https?://)?(?:w\.)?soundcloud\.com/player.+?)\1', webpage)] diff --git a/haruhi_dl/extractor/spankwire.py b/haruhi_dl/extractor/spankwire.py index 35ab9ec37..b9759c394 100644 --- a/haruhi_dl/extractor/spankwire.py +++ b/haruhi_dl/extractor/spankwire.py @@ -68,7 +68,7 @@ class SpankwireIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+\bsrc=["\']((?:https?:)?//(?:www\.)?spankwire\.com/EmbedPlayer\.aspx/?\?.*?\bArticleId=\d+)', webpage) diff --git a/haruhi_dl/extractor/sportbox.py b/haruhi_dl/extractor/sportbox.py index b9017fd2a..dc27a4b6b 100644 --- a/haruhi_dl/extractor/sportbox.py +++ b/haruhi_dl/extractor/sportbox.py @@ -46,7 +46,7 @@ class SportBoxIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+src="(https?://(?:news\.sportbox|matchtv)\.ru/vdl/player[^"]+)"', webpage) diff --git a/haruhi_dl/extractor/springboardplatform.py b/haruhi_dl/extractor/springboardplatform.py index 07d99b579..c0b8bd8b4 100644 --- a/haruhi_dl/extractor/springboardplatform.py +++ b/haruhi_dl/extractor/springboardplatform.py @@ -49,7 +49,7 @@ class SpringboardPlatformIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [ mobj.group('url') for mobj in re.finditer( diff --git a/haruhi_dl/extractor/theplatform.py b/haruhi_dl/extractor/theplatform.py index 472238010..5b14bbf82 100644 --- a/haruhi_dl/extractor/theplatform.py +++ b/haruhi_dl/extractor/theplatform.py @@ -198,7 +198,7 @@ class ThePlatformIE(ThePlatformBaseIE, AdobePassIE): }] @classmethod - def _extract_urls(cls, webpage): + def _extract_urls(cls, webpage, **kwargs): m = re.search( r'''(?x) ]+?src=(["\'])(?P(?:https?:)?//player\.(?:tna|emp)flix\.com/video/\d+)\1', webpage)] diff --git a/haruhi_dl/extractor/tube8.py b/haruhi_dl/extractor/tube8.py index db93b0182..8de4e2b79 100644 --- a/haruhi_dl/extractor/tube8.py +++ b/haruhi_dl/extractor/tube8.py @@ -32,7 +32,7 @@ class Tube8IE(KeezMoviesIE): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+\bsrc=["\']((?:https?:)?//(?:www\.)?tube8\.com/embed/(?:[^/]+/)+\d+)', webpage) diff --git a/haruhi_dl/extractor/tunein.py b/haruhi_dl/extractor/tunein.py index c7a5f5a63..9e3ea8c9d 100644 --- a/haruhi_dl/extractor/tunein.py +++ b/haruhi_dl/extractor/tunein.py @@ -12,7 +12,7 @@ class TuneInBaseIE(InfoExtractor): _API_BASE_URL = 'http://tunein.com/tuner/tune/' @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+src=["\'](?P(?:https?://)?tunein\.com/embed/player/[pst]\d+)', webpage) diff --git a/haruhi_dl/extractor/twentymin.py b/haruhi_dl/extractor/twentymin.py index a42977f39..31b8a97e5 100644 --- a/haruhi_dl/extractor/twentymin.py +++ b/haruhi_dl/extractor/twentymin.py @@ -48,7 +48,7 @@ class TwentyMinutenIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [m.group('url') for m in re.finditer( r']+src=(["\'])(?P(?:(?:https?:)?//)?(?:www\.)?20min\.ch/videoplayer/videoplayer.html\?.*?\bvideoId@\d+.*?)\1', webpage)] diff --git a/haruhi_dl/extractor/vice.py b/haruhi_dl/extractor/vice.py index e37499512..eef99f2ae 100644 --- a/haruhi_dl/extractor/vice.py +++ b/haruhi_dl/extractor/vice.py @@ -107,7 +107,7 @@ class ViceIE(ViceBaseIE, AdobePassIE): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+\bsrc=["\']((?:https?:)?//video\.vice\.com/[^/]+/embed/[\da-f]{24})', webpage) diff --git a/haruhi_dl/extractor/videa.py b/haruhi_dl/extractor/videa.py index d0e34c819..5830e7fd7 100644 --- a/haruhi_dl/extractor/videa.py +++ b/haruhi_dl/extractor/videa.py @@ -55,7 +55,7 @@ class VideaIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [url for _, url in re.findall( r']+src=(["\'])(?P(?:https?:)?//videa\.hu/player\?.*?\bv=.+?)\1', webpage)] diff --git a/haruhi_dl/extractor/videopress.py b/haruhi_dl/extractor/videopress.py index e5f964d39..8938050a5 100644 --- a/haruhi_dl/extractor/videopress.py +++ b/haruhi_dl/extractor/videopress.py @@ -39,7 +39,7 @@ class VideoPressIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+src=["\']((?:https?://)?videopress\.com/embed/[\da-zA-Z]+)', webpage) diff --git a/haruhi_dl/extractor/viqeo.py b/haruhi_dl/extractor/viqeo.py index be7dfa814..b53eb66ac 100644 --- a/haruhi_dl/extractor/viqeo.py +++ b/haruhi_dl/extractor/viqeo.py @@ -39,7 +39,7 @@ class ViqeoIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [ mobj.group('url') for mobj in re.finditer( diff --git a/haruhi_dl/extractor/vshare.py b/haruhi_dl/extractor/vshare.py index c631ac1fa..35ed84bcc 100644 --- a/haruhi_dl/extractor/vshare.py +++ b/haruhi_dl/extractor/vshare.py @@ -27,7 +27,7 @@ class VShareIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+?src=["\'](?P(?:https?:)?//(?:www\.)?vshare\.io/v/[^/?#&]+)', webpage) diff --git a/haruhi_dl/extractor/vzaar.py b/haruhi_dl/extractor/vzaar.py index b7d02fca3..9ef961f8d 100644 --- a/haruhi_dl/extractor/vzaar.py +++ b/haruhi_dl/extractor/vzaar.py @@ -51,7 +51,7 @@ class VzaarIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+src=["\']((?:https?:)?//(?:view\.vzaar\.com)/[0-9]+)', webpage) diff --git a/haruhi_dl/extractor/washingtonpost.py b/haruhi_dl/extractor/washingtonpost.py index 625d0a1cc..329907465 100644 --- a/haruhi_dl/extractor/washingtonpost.py +++ b/haruhi_dl/extractor/washingtonpost.py @@ -29,7 +29,7 @@ class WashingtonPostIE(InfoExtractor): } @classmethod - def _extract_urls(cls, webpage): + def _extract_urls(cls, webpage, **kwargs): return re.findall( r']+\bsrc=["\'](%s)' % cls._EMBED_URL, webpage) diff --git a/haruhi_dl/extractor/xfileshare.py b/haruhi_dl/extractor/xfileshare.py index 48ef07ed1..20f7013f3 100644 --- a/haruhi_dl/extractor/xfileshare.py +++ b/haruhi_dl/extractor/xfileshare.py @@ -81,7 +81,7 @@ class XFileShareIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [ mobj.group('url') for mobj in re.finditer( diff --git a/haruhi_dl/extractor/xhamster.py b/haruhi_dl/extractor/xhamster.py index 76aeaf9a4..19ec98de2 100644 --- a/haruhi_dl/extractor/xhamster.py +++ b/haruhi_dl/extractor/xhamster.py @@ -325,7 +325,7 @@ class XHamsterEmbedIE(InfoExtractor): } @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [url for _, url in re.findall( r']+?src=(["\'])(?P(?:https?:)?//(?:www\.)?xhamster\.com/xembed\.php\?video=\d+)\1', webpage)] diff --git a/haruhi_dl/extractor/yapfiles.py b/haruhi_dl/extractor/yapfiles.py index cfb368de9..00d8c2552 100644 --- a/haruhi_dl/extractor/yapfiles.py +++ b/haruhi_dl/extractor/yapfiles.py @@ -34,7 +34,7 @@ class YapFilesIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [unescapeHTML(mobj.group('url')) for mobj in re.finditer( r']+\bsrc=(["\'])(?P(?:https?:)?%s.*?)\1' % YapFilesIE._YAPFILES_URL, webpage)] diff --git a/haruhi_dl/extractor/youporn.py b/haruhi_dl/extractor/youporn.py index e7fca22de..c178e2f39 100644 --- a/haruhi_dl/extractor/youporn.py +++ b/haruhi_dl/extractor/youporn.py @@ -65,7 +65,7 @@ class YouPornIE(InfoExtractor): }] @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return re.findall( r']+\bsrc=["\']((?:https?:)?//(?:www\.)?youporn\.com/embed/\d+)', webpage) diff --git a/haruhi_dl/extractor/youtube.py b/haruhi_dl/extractor/youtube.py index e61b5ae08..728105789 100644 --- a/haruhi_dl/extractor/youtube.py +++ b/haruhi_dl/extractor/youtube.py @@ -1252,7 +1252,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'Unable to mark watched', fatal=False) @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): # Embedded YouTube player entries = [ unescapeHTML(mobj.group('url')) diff --git a/haruhi_dl/extractor/zype.py b/haruhi_dl/extractor/zype.py index 2e2e97a0c..f336ebdb9 100644 --- a/haruhi_dl/extractor/zype.py +++ b/haruhi_dl/extractor/zype.py @@ -33,7 +33,7 @@ class ZypeIE(InfoExtractor): } @staticmethod - def _extract_urls(webpage): + def _extract_urls(webpage, **kwargs): return [ mobj.group('url') for mobj in re.finditer(