• 企业400电话
  • 微网小程序
  • AI电话机器人
  • 电商代运营
  • 全 部 栏 目

    企业400电话 网络优化推广 AI电话机器人 呼叫中心 网站建设 商标✡知产 微网小程序 电商运营 彩铃•短信 增值拓展业务
    python 爬取影视网站下载链接

    项目地址:

    https://github.com/GriffinLewis2001/Python_movie_links_scraper

    运行效果

    导入模块

    import requests,re
    from requests.cookies import RequestsCookieJar
    from fake_useragent import UserAgent
    import os,pickle,threading,time
    import concurrent.futures
    from goto import with_goto

    爬虫主代码

    def get_content_url_name(url):
        send_headers = {
         "User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
        "Connection": "keep-alive",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.8"
    
            }
        cookie_jar = RequestsCookieJar()
        cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
        response=requests.get(url,send_headers,cookies=cookie_jar)
        response.encoding='utf-8'
        content=response.text
        reg=re.compile(r'a href="(.*?)" rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  class="thumbnail-img" title="(.*?)"')
        url_name_list=reg.findall(content)
        return url_name_list
    
    def get_content(url):
        send_headers = {
         "User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
        "Connection": "keep-alive",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.8"
    
            }
        cookie_jar = RequestsCookieJar()
        cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
        response=requests.get(url,send_headers,cookies=cookie_jar)
        response.encoding='utf-8'
        return response.text
    
    
    
    def search_durl(url):
        content=get_content(url)
        reg=re.compile(r"{'\\x64\\x65\\x63\\x72\\x69\\x70\\x74\\x50\\x61\\x72\\x61\\x6d':'(.*?)'}")
        index=reg.findall(content)[0]
        download_url=url[:-5]+r'/downloadList?decriptParam='+index
        content=get_content(download_url)
        reg1=re.compile(r'title=".*?" href="(.*?)" rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow" ')
        download_list=reg1.findall(content)
        return download_list
    def get_page(url):
        send_headers = {
         "User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
        "Connection": "keep-alive",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.8"
    
            }
        cookie_jar = RequestsCookieJar()
        cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
        response=requests.get(url,send_headers,cookies=cookie_jar)
        response.encoding='utf-8'
        content=response.text
        reg=re.compile(r'a target="_blank" class="title" href="(.*?)" rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  title="(.*?)">(.*?)\/a>')
        url_name_list=reg.findall(content)
        return url_name_list
    @with_goto
    def main():
    
        print("=========================================================")
        name=input("请输入剧名(输入quit退出):")
        if name == "quit":
            exit()
        url="http://www.yikedy.co/search?query="+name
        dlist=get_page(url)
        print("\n")
        if(dlist):
            num=0
            count=0
            for i in dlist:
                if (name in i[1]) :
                    print(f"{num} {i[1]}")
                    num+=1
                elif num==0 and count==len(dlist)-1:
                    goto .end
                count+=1
            dest=int(input("\n\n请输入剧的编号(输100跳过此次搜寻):"))
            if dest == 100:
                goto .end
            x=0
            print("\n以下为下载链接:\n")
            for i in dlist:
                if (name in i[1]):
                    if(x==dest):
                        for durl in search_durl(i[0]):
                            print(f"{durl}\n")
    
                        print("\n")
    
                        break
                    x+=1
    
        else:
            label .end
            print("没找到或不想看\n")

    完整代码

    import requests,re
    from requests.cookies import RequestsCookieJar
    from fake_useragent import UserAgent
    import os,pickle,threading,time
    import concurrent.futures
    from goto import with_goto
    
    def get_content_url_name(url):
        send_headers = {
         "User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
        "Connection": "keep-alive",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.8"
    
            }
        cookie_jar = RequestsCookieJar()
        cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
        response=requests.get(url,send_headers,cookies=cookie_jar)
        response.encoding='utf-8'
        content=response.text
        reg=re.compile(r'a href="(.*?)" rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  class="thumbnail-img" title="(.*?)"')
        url_name_list=reg.findall(content)
        return url_name_list
    
    def get_content(url):
        send_headers = {
         "User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
        "Connection": "keep-alive",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.8"
    
            }
        cookie_jar = RequestsCookieJar()
        cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
        response=requests.get(url,send_headers,cookies=cookie_jar)
        response.encoding='utf-8'
        return response.text
    
    
    
    def search_durl(url):
        content=get_content(url)
        reg=re.compile(r"{'\\x64\\x65\\x63\\x72\\x69\\x70\\x74\\x50\\x61\\x72\\x61\\x6d':'(.*?)'}")
        index=reg.findall(content)[0]
        download_url=url[:-5]+r'/downloadList?decriptParam='+index
        content=get_content(download_url)
        reg1=re.compile(r'title=".*?" href="(.*?)" rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow" ')
        download_list=reg1.findall(content)
        return download_list
    def get_page(url):
        send_headers = {
         "User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
        "Connection": "keep-alive",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.8"
    
            }
        cookie_jar = RequestsCookieJar()
        cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
        response=requests.get(url,send_headers,cookies=cookie_jar)
        response.encoding='utf-8'
        content=response.text
        reg=re.compile(r'a target="_blank" class="title" href="(.*?)" rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  rel="external nofollow"  title="(.*?)">(.*?)\/a>')
        url_name_list=reg.findall(content)
        return url_name_list
    @with_goto
    def main():
    
        print("=========================================================")
        name=input("请输入剧名(输入quit退出):")
        if name == "quit":
            exit()
        url="http://www.yikedy.co/search?query="+name
        dlist=get_page(url)
        print("\n")
        if(dlist):
            num=0
            count=0
            for i in dlist:
                if (name in i[1]) :
                    print(f"{num} {i[1]}")
                    num+=1
                elif num==0 and count==len(dlist)-1:
                    goto .end
                count+=1
            dest=int(input("\n\n请输入剧的编号(输100跳过此次搜寻):"))
            if dest == 100:
                goto .end
            x=0
            print("\n以下为下载链接:\n")
            for i in dlist:
                if (name in i[1]):
                    if(x==dest):
                        for durl in search_durl(i[0]):
                            print(f"{durl}\n")
    
                        print("\n")
    
                        break
                    x+=1
    
        else:
            label .end
            print("没找到或不想看\n")
    
    print("本软件由CLY.所有\n\n")
    while(True):
        main()

    以上就是python 爬取影视网站下载链接的详细内容,更多关于python 爬取下载链接的资料请关注脚本之家其它相关文章!

    您可能感兴趣的文章:
    • python 爬取吉首大学网站成绩单
    • python趣味挑战之爬取天气与微博热搜并自动发给微信好友
    • Python爬虫之爬取我爱我家二手房数据
    • python 爬取京东指定商品评论并进行情感分析
    • python结合多线程爬取英雄联盟皮肤(原理分析)
    • python爬取豆瓣电影TOP250数据
    • python爬取链家二手房的数据
    • 教你怎么用python爬取爱奇艺热门电影
    • Python爬虫之爬取最新更新的小说网站
    • Python爬虫实战之爬取携程评论
    上一篇:Python爬虫之爬取我爱我家二手房数据
    下一篇:pytorch中DataLoader()过程中遇到的一些问题
  • 相关文章
  • 

    © 2016-2020 巨人网络通讯 版权所有

    《增值电信业务经营许可证》 苏ICP备15040257号-8

    python 爬取影视网站下载链接 python,爬取,影视,网站,下载,