Python通过代理多线程抓取图片

    Python作为一门功能强大的脚本语言,经常被用来写爬虫程序,下面是Python通过代理多线程抓取图片代码
    Python爬虫多线程抓取代理服务器参考:http://www.linuxeye.com/program/1763.html

    说明:
    1. 多线程方式抓取代理服务器,并多线程验证代理服务器
    ps 代理服务器是从http://www.cnproxy.com/ (测试只选择了8个页面)抓取

    2. 抓取一个网站的图片地址,多线程随机取一个代理服务器下载图片
    ps 图片网站地址:http://www.ivsky.com(测试只选择了有限的页面数)

    #!/usr/bin/env python
    #BLOG:linuxeye.com
    #coding:utf-8
    
    import urllib2
    import re
    import threading
    import time
    import random
    
    rawProxyList = []
    checkedProxyList = []
    imgurl_list = []
    
    #抓取代理网站
    portdicts ={'v':"3",'m':"4",'a':"2",'l':"9",'q':"0",'b':"5",'i':"7",'w':"6",'r':"8",'c':"1"}
    targets = []
    for i in xrange(1,9):
            target = r"http://www.cnproxy.com/proxy%d.html" % i
            targets.append(target)
    #print targets
    
    #抓取代理服务器正则
    p = re.compile(r'''<tr><td>(.+?)<SCRIPT type=text/javascript>document.write\(":"\+(.+?)\)</SCRIPT></td><td>(.+?)</td><td>.+?</td><td>(.+?)</td></tr>''')
    
    #获取代理的类
    class ProxyGet(threading.Thread):
        def __init__(self,target):
            threading.Thread.__init__(self)
            self.target = target
    
        def getProxy(self):
            print "代理服务器目标网站: " + self.target
            req = urllib2.urlopen(self.target)
            result = req.read()
            #print chardet.detect(result)
            matchs = p.findall(result)
            for row in matchs:
                ip=row[0]
                port =row[1]
                port = map(lambda x:portdicts[x],port.split('+'))
                port = ''.join(port)
                agent = row[2]
                addr = row[3].decode("cp936").encode("utf-8")
                proxy = [ip,port,addr]
                #print proxy
                rawProxyList.append(proxy)
    
        def run(self):
            self.getProxy()
    
    #检验代理的类
    class ProxyCheck(threading.Thread):
        def __init__(self,proxyList):
            threading.Thread.__init__(self)
            self.proxyList = proxyList
            self.timeout = 5
            self.testUrl = "http://www.baidu.com/"
            self.testStr = "030173"
    
        def checkProxy(self):
            cookies = urllib2.HTTPCookieProcessor()
            for proxy in self.proxyList:
                proxyHandler = urllib2.ProxyHandler({"http" : r'http://%s:%s' %(proxy[0],proxy[1])})
                #print r'http://%s:%s' %(proxy[0],proxy[1])
                opener = urllib2.build_opener(cookies,proxyHandler)
                opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0')] 
                #urllib2.install_opener(opener)
                t1 = time.time()
    
                try:
                    #req = urllib2.urlopen("http://www.baidu.com", timeout=self.timeout)
                    req = opener.open(self.testUrl, timeout=self.timeout)
                    #print "urlopen is ok...."
                    result = req.read()
                    #print "read html...."
                    timeused = time.time() - t1
                    pos = result.find(self.testStr)
                    #print "pos is %s" %pos
    
                    if pos > 1:
                        checkedProxyList.append((proxy[0],proxy[1],proxy[2],timeused))
                        #print "ok ip: %s %s %s %s" %(proxy[0],proxy[1],proxy[2],timeused)
                    else:
                         continue
                except Exception,e:
                    #print e.message
                    continue
    
        def run(self):
            self.checkProxy()
    
    #获取图片地址函数
    def imgurlList(url_home):
        global imgurl_list
        home_page = urllib2.urlopen(url_home)
        url_re = re.compile(r'<li><a href="(.+?)" target="_blank" rel="nofollow">')
        pic_re = re.compile(r'<img src="(.*?\.\w{3,4})"')
        url_list = re.findall(url_re,home_page.read())
        for url in url_list:
            #print url_home+url
            url_page = urllib2.urlopen(url_home+url)
            for imgurlList in re.findall(pic_re,url_page.read()):
                imgurl_list.append(imgurlList)
    
    #下载图片的类
    class getPic(threading.Thread):
        def __init__(self,imgurl_list):
            threading.Thread.__init__(self)
            self.imgurl_list = imgurl_list 
            self.timeout = 5
        def downloadimg(self):
            for imgurl in self.imgurl_list:
                pic_suffix = imgurl.split('.')[-1] #获取图片后缀
                pic_name = str(random.randint(0,10000000000))+'.'+pic_suffix
                cookies = urllib2.HTTPCookieProcessor()
                randomCheckedProxy = random.choice(checkedProxyList) #随机取一组代理服务器
                proxyHandler = urllib2.ProxyHandler({"http" : r'http://%s:%s' %(randomCheckedProxy[0],randomCheckedProxy[1])})
                opener = urllib2.build_opener(cookies,proxyHandler)
                opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0')]
                urllib2.install_opener(opener)
                try:
                    data_img = opener.open(imgurl,timeout=self.timeout)
                    f = open (pic_name,'wb')
                    f.write(data_img.read())
                    f.close()
                except:
                    continue
        def run(self):
            self.downloadimg()
    
    if __name__ == "__main__":
        getThreads = []
        checkThreads = []
        imgurlList('http://www.ivsky.com')
        getPicThreads = []
    
    #对每个目标网站开启一个线程负责抓取代理
    for i in range(len(targets)):
        t = ProxyGet(targets[i])
        getThreads.append(t)
    
    for i in range(len(getThreads)):
        getThreads[i].start()
    
    for i in range(len(getThreads)):
        getThreads[i].join()
    
    print '.'*10+"总共抓取了%s个代理" %len(rawProxyList) +'.'*10
    
    #开启20个线程负责校验,将抓取到的代理分成20份,每个线程校验一份
    for i in range(20):
        t = ProxyCheck(rawProxyList[((len(rawProxyList)+19)/20) * i:((len(rawProxyList)+19)/20) * (i+1)])
        checkThreads.append(t)
    
    for i in range(len(checkThreads)):
        checkThreads[i].start()
    
    for i in range(len(checkThreads)):
        checkThreads[i].join()
    
    print '.'*10+"总共有%s个代理通过校验" %len(checkedProxyList) +'.'*10
    
    #开启20个线程随机取一个代理下载图片
    for i in range(20):
        t = getPic(imgurl_list[((len(imgurl_list)+19)/20) * i:((len(imgurl_list)+19)/20) * (i+1)])
        getPicThreads.append(t)
    
    for i in range(len(getPicThreads)):
        getPicThreads[i].start()
    
    for i in range(len(getPicThreads)):
        getPicThreads[i].join()
    
    print '.'*10+"总共有%s个图片下载" %len(imgurl_list) +'.'*10
    
    #代理排序持久化
    f= open("proxy_list.txt",'w+')
    for proxy in sorted(checkedProxyList,cmp=lambda x,y:cmp(x[3],y[3])):
        #print "checked proxy is: %s:%s\t%s\t%s" %(proxy[0],proxy[1],proxy[2],proxy[3])
        f.write("%s:%s\t%s\t%s\n"%(proxy[0],proxy[1],proxy[2],proxy[3]))
    f.close()
    
    测试结果:
    
    # ls
    proxy_getpic.py
    # python proxy_getpic.py
    代理服务器目标网站: http://www.cnproxy.com/proxy1.html
    代理服务器目标网站: http://www.cnproxy.com/proxy2.html
    代理服务器目标网站: http://www.cnproxy.com/proxy3.html
    代理服务器目标网站: http://www.cnproxy.com/proxy4.html
    代理服务器目标网站: http://www.cnproxy.com/proxy5.html
    代理服务器目标网站: http://www.cnproxy.com/proxy6.html
    代理服务器目标网站: http://www.cnproxy.com/proxy7.html
    代理服务器目标网站: http://www.cnproxy.com/proxy8.html
    ..........总共抓取了800个代理..........
    ..........总共有458个代理通过校验..........
    ..........总共有154个图片下载..........
    # cat proxy_list.txt | more
    173.213.113.111:3128    United States   0.432188987732
    173.213.113.111:8089    United States   0.441318035126
    173.213.113.111:7808    United States   0.444597005844
    110.4.24.170:80 香港 香港移动通讯有限公司       0.489440202713
    211.142.236.135:8080    湖南省株洲市 移动       0.490673780441
    211.142.236.135:8081    湖南省株洲市 移动       0.518096923828
    211.142.236.135:8000    湖南省株洲市 移动       0.51860499382
    211.142.236.135:8082    湖南省株洲市 移动       0.520448207855
    # ls
    1001117689.jpg  3097883176.jpg  5234319709.jpg  7012274766.jpg  8504924248.jpg
    1076458640.jpg  3144369522.jpg  5387877704.jpg  7106183143.jpg  867723868.jpg
    1198548712.jpg  3161307031.jpg  5572092752.jpg  7361254661.jpg  8746315373.jpg
    165738192.jpg   3228008315.jpg  5575388077.jpg  7389537793.jpg  8848973192.jpg
    1704512138.jpg  3306931164.jpg  5610740708.jpg  7407358698.jpg  8973834958.jpg
    1742167711.jpg  3320152673.jpg  5717429022.jpg  7561176207.jpg  8976862152.jpg
    ...............

    Fri Jul 12 11:34:12 CST 2013
    • 本文由 发表于 2013-07-12
    • 转载请务必保留本文链接:https://linuxeye.com/340.html
    腾讯云COS上传、批量删除工具(Python) 脚本

    腾讯云COS上传、批量删除工具(Python)

    腾讯云对象存储COS是类似于阿里云OSS,相比OSS,COS提供每月免费额度:存储空间50G、外网访问流量10G(内网免费)、免费读请求100万次、写请求10万次。对网站备份来说不错,但是,腾讯云提供...
    Python多线程抓取代理服务器 Linux

    Python多线程抓取代理服务器

    Python作为一门功能强大的脚本语言来说,经常被用来写爬虫程序,下面是Python爬虫多线程抓取代理服务器。 年前是用 //linuxeye.com/340.html 来抓取代理服务器的,谁知道过完...
    Python实现文件的全备份和差异备份 Linux

    Python实现文件的全备份和差异备份

    之前有写利用md5方式来做差异备份,但是这种md5方式来写存在以下问题: md5sum获取有些软连接的MD5值存在问题 不支持对空目录进行备份,因为md5sum无法获取空目录的md5值 权限的修改md...
    匿名

    发表评论

    匿名网友