(转)python爬虫实战——5分钟做个图片(表情包)自动下载器

http://www.jianshu.com/p/19c846daccb3

用这个下载了好多表情包。。。。。。。 先下载了300个表情包。。。。。 网盘地址 http://pan.baidu.com/s/1pKTKkGz


#_*_ coding:utf-8 _*_

import re
import requests

def downloadPic(html,keyword):
pic_url = re.findall('"objURL":"(.*?)",',html,re.S)
i = 0
print '找到关键词:'+keyword+'的图片,现在开始下载图片...'
for each in pic_url:
print '正在下载第'+str(i+1)+'张图片,图片地址:'+str(each)
try:
pic = requests.get(each,timeout=10)
except requests.exceptions.ConnectionError:
print '错误当前图片无法下载'
continue

string = 'pictures\\'+keyword+'_'+str(i)+'.jpg'

fp = open(string.decode('utf-8').encode('cp936'),'wb')
fp.write(pic.content)
fp.close()
i += 1


if __name__ == '__main__':
word = raw_input("Input key word:")
url = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word='+word+'&ct=201326592&v=flip'
result = requests.get(url)
downloadPic(result.text,word)







Loading Disqus comments...
Table of Contents