你所需要的,不仅仅是一个好用的代理。
CSDN 原则上不让非人浏览访问,正常爬虫无法从这里爬取文章,需要进行模拟人为浏览器访问。
使用:输入带文章的 CSDN 链接自动生成正文的 HTML,文件名为标题名
demo
Python
#!/usr/bin/env python # coding=utf-8 import random import socket import urllib2 import urllib import re import string import BeautifulSoup import sys import cookielib ERROR = { '0':'Can not open the url,checck you net', '1':'Creat download dir error', '2':'The image links is empty', '3':'Download faild', '4':'Build soup error,the html is empty', '5':'Can not save the image to your disk', } class BrowserBase(object): """模拟浏览器""" def __init__(self): socket.setdefaulttimeout(20) self.HTML = '' self.articleName = '' self.link = '' def speak(self,name,content): print '[%s]%s' %(name,content) def openurl(self,url): """ 打开网页 """ cookie_support= urllib2.HTTPCookieProcessor(cookielib.CookieJar()) self.opener = urllib2.build_opener(cookie_support,urllib2.HTTPHandler) urllib2.install_opener(self.opener) user_agents = [ 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11', 'Opera/9.25 (Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9', "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7", "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ", ] agent = random.choice(user_agents) self.opener.addheaders = [("User-agent",agent),("Accept","*/*"),('Referer','http://www.google.com')] try: res = self.opener.open(url) self.HTML = res.read() # print res.read() except Exception,e: self.speak(str(e),url) raise Exception else: return res def OUT(self): print self.HTML def getArticleName(self,tags): re_rules = r'<span class="(.+?)"><a href="(.+?)">(.+?)</a>' p = re.compile(re_rules,re.DOTALL) title = p.findall(str(tags)) self.chineseListOut(title) def chineseListOut(self,tags): title = [] for tag in tags: for ele in tag: # print '+',str(ele),'-' title.append(ele.strip()) self.link = "http://blog.csdn.net" + title[1] tle = title[2].split() self.articleName = '-'.join(tle) def buildArticleHTML(self): self.HTML = str(self.HTML) self.HTML = '<html><meta charset="utf-8"><body>'+self.HTML self.HTML = self.HTML + '</body><html>' def getMainArticle(self): """get the main article of CSDN blog""" soup = BeautifulSoup.BeautifulSoup(self.HTML) tags_all = soup.findAll('div',{'class':'article_title'}) self.getArticleName(tags_all) tags_all = soup.findAll('div',{'id':'article_content','class':'article_content'}) self.HTML = tags_all[0] self.buildArticleHTML() def saveArticleAsHTML(self): filePath = self.articleName+'.html' try: filePointer = open(filePath,'w+') except: print 'open error' print 'path = ',filePath filePointer.write(self.HTML) filePointer.close() browser = BrowserBase() url = raw_input('Input the links of CSDN article you needed!\n') if url is None or len(url) == 0: url = "http://blog.csdn.net/nealgavin/article/details/27110717" browser.openurl(url) browser.getMainArticle() browser.saveArticleAsHTML()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
#!/usr/bin/env python
# coding=utf-8
import random
import socket
import urllib2
import urllib
import re
import string
import BeautifulSoup
import sys
import cookielib
ERROR = {
'0':'Can not open the url,checck you net',
'1':'Creat download dir error',
'2':'The image links is empty',
'3':'Download faild',
'4':'Build soup error,the html is empty',
'5':'Can not save the image to your disk',
}
class BrowserBase(object):
"""模拟浏览器"""
def __init__(self):
socket.setdefaulttimeout(20)
self.HTML = ''
self.articleName = ''
self.link = ''
def speak(self,name,content):
print '[%s]%s' %(name,content)
def openurl(self,url):
"""
打开网页
"""
cookie_support= urllib2.HTTPCookieProcessor(cookielib.CookieJar())
self.opener = urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
urllib2.install_opener(self.opener)
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ",
]
agent = random.choice(user_agents)
self.opener.addheaders = [("User-agent",agent),("Accept","*/*"),('Referer','http://www.google.com')]
try:
res = self.opener.open(url)
self.HTML = res.read()
# print res.read()
except Exception,e:
self.speak(str(e),url)
raise Exception
else:
return res
def OUT(self):
print self.HTML
def getArticleName(self,tags):
re_rules = r'<span class="(.+?)"><a href="(.+?)">(.+?)</a>'
p = re.compile(re_rules,re.DOTALL)
title = p.findall(str(tags))
self.chineseListOut(title)
def chineseListOut(self,tags):
title = []
for tag in tags:
for ele in tag:
# print '+',str(ele),'-'
title.append(ele.strip())
self.link = "http://blog.csdn.net" + title[1]
tle = title[2].split()
self.articleName = '-'.join(tle)
def buildArticleHTML(self):
self.HTML = str(self.HTML)
self.HTML = '<html><meta charset="utf-8"><body>'+self.HTML
self.HTML = self.HTML + '</body><html>'
def getMainArticle(self):
"""get the main article of CSDN blog"""
soup = BeautifulSoup.BeautifulSoup(self.HTML)
tags_all = soup.findAll('div',{'class':'article_title'})
self.getArticleName(tags_all)
tags_all = soup.findAll('div',{'id':'article_content','class':'article_content'})
self.HTML = tags_all[0]
self.buildArticleHTML()
def saveArticleAsHTML(self):
filePath = self.articleName+'.html'
try:
filePointer = open(filePath,'w+')
except:
print 'open error'
print 'path = ',filePath
filePointer.write(self.HTML)
filePointer.close()
browser = BrowserBase()
url = raw_input('Input the links of CSDN article you needed!\n')
if url is None or len(url) == 0:
url = "http://blog.csdn.net/nealgavin/article/details/27110717"
browser.openurl(url)
browser.getMainArticle()
browser.saveArticleAsHTML()
抓取过于频繁,服务器返回429.这个时候需要切换代理IP了,推荐使用阿布云代理,阿布云代理IP,提供高匿代理,爬虫代理.