前言
关于python版本,我一开始看很多资料说python2比较好,因为很多库还不支持3,但是使用到现在为止觉得还是pythin3比较好用,因为编码什么的问题,觉得2还是没有3方便。而且在网上找到的2中的一些资料稍微改一下也还是可以用。
好了,开始说爬百度百科的事。
这里设定的需求是爬取北京地区n个景点的全部信息,n个景点的名称是在文件中给出的。没有用到api,只是单纯的爬网页信息。
1、根据关键字获取url
由于只需要爬取信息,而且不涉及交互,可以使用简单的方法而不需要模拟浏览器。
可以直接
1
|
<strong>http: / / baike.baidu.com / search / word?word = "guanjianci" < / strong> |
1
2
3
4
5
|
<strong> for < / strong>l <strong> in < / strong>view_names: <strong> '''http://baike.baidu.com/search/word?word=''' < / strong><em> # 得到url的方法 < / em><em> < / em>name = urllib.parse.quote(l) name.encode(<strong> 'utf-8' < / strong>) url = <strong> 'http://baike.baidu.com/search/word?word=' < / strong> + name |
这里要注意关键词是中午所以要注意编码问题,由于url中不能出现空格,所以需要用quote
函数处理一下。
关于quote():
在 Python2.x 中的用法是:urllib.quote(text)
。Python3.x 中是urllib.parse.quote(text)
。按照标准,URL只允许一部分ASCII 字符(数字字母和部分符号),其他的字符(如汉字)是不符合URL标准的。所以URL中使用其他字符就需要进行URL编码。URL中传参数的部分(query String),格式是:name1=value1&name2=value2&name3=value3
。假如你的name或者value值中的『&』或者『=』等符号,就当然会有问题。所以URL中的参数字符串也需要把『&=』等符号进行编码。URL编码的方式是把需要编码的字符转化为%xx的形式。通常URL编码是基于UTF-8的(当然这和浏览器平台有关)
例子:
比如『我,unicode 为 0x6211,UTF-8编码为0xE60x880x91,URL编码就是 %E6%88%91。
Python的urllib库中提供了quote
和quote_plus
两种方法。这两种方法的编码范围不同。不过不用深究,这里用quote
就够了。
2、下载url
用urllib库轻松实现,见下面的代码中def download(self,url)
3、利用Beautifulsoup获取html
4、数据分析
百科中的内容是并列的段,所以在爬的时候不能自然的按段逻辑存储(因为全都是并列的)。所以必须用正则的方法。
基本的想法就是把整个html文件看做是str,然后用正则的方法截取想要的内容,在重新把这段内容转换成beautifulsoup
对象,然后在进一步处理。
可能要花些时间看一下正则。
代码中还有很多细节,忘了再查吧只能,下次绝对应该边做编写文档,或者做完马上写。。。
贴代码!
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
|
# coding:utf-8 ''' function:爬取百度百科所有北京景点, author:yi ''' import urllib.request from urllib.request import urlopen from urllib.error import HTTPError import urllib.parse from bs4 import BeautifulSoup import re import codecs import json class BaikeCraw( object ): def __init__( self ): self .urls = set () self .view_datas = {} def craw( self ,filename): urls = self .getUrls(filename) if urls = = None : print ( "not found" ) else : for urll in urls: print (urll) try : html_count = self .download(urll) self .passer(urll, html_count) except : print ( "view do not exist" ) '''file=self.view_datas["view_name"] self.craw_pic(urll,file,html_count) print(file)''' def getUrls ( self , filename): new_urls = set () file_object = codecs. open (filename, encoding = 'utf-16' , ) try : all_text = file_object.read() except : print ( "文件打开异常!" ) file_object.close() file_object.close() view_names = all_text.split( " " ) for l in view_names: if '?' in l: view_names.remove(l) for l in view_names: '''http://baike.baidu.com/search/word?word=''' # 得到url的方法 name = urllib.parse.quote(l) name.encode( 'utf-8' ) url = 'http://baike.baidu.com/search/word?word=' + name new_urls.add(url) print (new_urls) return new_urls def manger( self ): pass def passer( self ,urll,html_count): soup = BeautifulSoup(html_count, 'html.parser' , from_encoding = 'utf_8' ) self ._get_new_data(urll, soup) return def download( self ,url): if url is None : return None response = urllib.request.urlopen(url) if response.getcode() ! = 200 : return None return response.read() def _get_new_data( self , url, soup): ##得到数据 if soup.find( 'div' , class_ = "main-content" ).find( 'h1' ) is not None : self .view_datas[ "view_name" ] = soup.find( 'div' , class_ = "main-content" ).find( 'h1' ).get_text() #景点名 print ( self .view_datas[ "view_name" ]) else : self .view_datas[ "view_name" ] = soup.find( "div" , class_ = "feature_poster" ).find( "h1" ).get_text() self .view_datas[ "view_message" ] = soup.find( 'div' , class_ = "lemma-summary" ).get_text() #简介 self .view_datas[ "basic_message" ] = soup.find( 'div' , class_ = "basic-info cmn-clearfix" ).get_text() #基本信息 self .view_datas[ "basic_message" ] = self .view_datas[ "basic_message" ].split( "\n" ) get = [] for line in self .view_datas[ "basic_message" ]: if line ! = "": get.append(line) self .view_datas[ "basic_message" ] = get i = 1 get2 = [] tmp = "%%" for line in self .view_datas[ "basic_message" ]: if i % 2 = = 1 : tmp = line else : a = tmp + ":" + line get2.append(a) i = i + 1 self .view_datas[ "basic_message" ] = get2 self .view_datas[ "catalog" ] = soup.find( 'div' , class_ = "lemma-catalog" ).get_text().split( "\n" ) #目录整体 get = [] for line in self .view_datas[ "catalog" ]: if line ! = "": get.append(line) self .view_datas[ "catalog" ] = get #########################百科内容 view_name = self .view_datas[ "view_name" ] html = urllib.request.urlopen(url) soup2 = BeautifulSoup(html.read(), 'html.parser' ).decode( 'utf-8' ) p = re. compile (r'', re.DOTALL) # 尾 r = p.search(content_data_node) content_data = content_data_node[ 0 :r.span( 0 )[ 0 ]] lists = content_data.split('') i = 1 for list in lists: #每一大块 final_soup = BeautifulSoup( list , "html.parser" ) name_list = None try : part_name = final_soup.find( 'h2' , class_ = "title-text" ).get_text().replace(view_name, '').strip() part_data = final_soup.get_text().replace(view_name, ' ').replace(part_name, ' ').replace(' 编辑 ', ' ') # 历史沿革 name_list = final_soup.findAll( 'h3' , class_ = "title-text" ) all_name_list = {} na = "part_name" + str (i) all_name_list[na] = part_name final_name_list = [] ########### for nlist in name_list: nlist = nlist.get_text().replace(view_name, '').strip() final_name_list.append(nlist) fin = "final_name_list" + str (i) all_name_list[fin] = final_name_list print (all_name_list) i = i + 1 #正文 try : p = re. compile (r'', re.DOTALL) final_soup = final_soup.decode( 'utf-8' ) r = p.search(final_soup) final_part_data = final_soup[r.span( 0 )[ 0 ]:] part_lists = final_part_data.split('') for part_list in part_lists: final_part_soup = BeautifulSoup(part_list, "html.parser" ) content_lists = final_part_soup.findAll( "div" , class_ = "para" ) for content_list in content_lists: # 每个最小段 try : pic_word = content_list.find( "div" , class_ = "lemma-picture text-pic layout-right" ).get_text() # 去掉文字中的图片描述 try : pic_word2 = content_list.find( "div" , class_ = "description" ).get_text() # 去掉文字中的图片描述 content_list = content_list.get_text().replace(pic_word, ' ').replace(pic_word2, ' ') except : content_list = content_list.get_text().replace(pic_word, '') except : try : pic_word2 = content_list.find( "div" , class_ = "description" ).get_text() # 去掉文字中的图片描述 content_list = content_list.get_text().replace(pic_word2, '') except : content_list = content_list.get_text() r_part = re. compile (r '\[\d.\]|\[\d\]' ) part_result, number = re.subn(r_part, "", content_list) part_result = "".join(part_result.split()) #print(part_result) except : final_part_soup = BeautifulSoup( list , "html.parser" ) content_lists = final_part_soup.findAll( "div" , class_ = "para" ) for content_list in content_lists: try : pic_word = content_list.find( "div" , class_ = "lemma-picture text-pic layout-right" ).get_text() # 去掉文字中的图片描述 try : pic_word2 = content_list.find( "div" , class_ = "description" ).get_text() # 去掉文字中的图片描述 content_list = content_list.get_text().replace(pic_word, ' ').replace(pic_word2, ' ') except : content_list = content_list.get_text().replace(pic_word, '') except : try : pic_word2 = content_list.find( "div" , class_ = "description" ).get_text() # 去掉文字中的图片描述 content_list = content_list.get_text().replace(pic_word2, '') except : content_list = content_list.get_text() r_part = re. compile (r '\[\d.\]|\[\d\]' ) part_result, number = re.subn(r_part, "", content_list) part_result = "".join(part_result.split()) #print(part_result) except : print ( "error" ) return def output( self ,filename): json_data = json.dumps( self .view_datas, ensure_ascii = False , indent = 2 ) fout = codecs. open (filename + '.json' , 'a' , encoding = 'utf-16' , ) fout.write( json_data) # print(json_data) return def craw_pic( self ,url,filename,html_count): soup = BeautifulSoup(html_count, 'html.parser' , from_encoding = 'utf_8' ) node_pic = soup.find( 'div' , class_ = 'banner' ).find( "a" , href = re. compile ( "/photo/poi/....\." )) if node_pic is None : return None else : part_url_pic = node_pic[ 'href' ] full_url_pic = urllib.parse.urljoin(url,part_url_pic) #print(full_url_pic) try : html_pic = urlopen(full_url_pic) except HTTPError as e: return None soup_pic = BeautifulSoup(html_pic.read()) pic_node = soup_pic.find( 'div' , class_ = "album-list" ) print (pic_node) return if __name__ = = "__main__" : spider = BaikeCraw() filename = "D:\PyCharm\\view_spider\\view_points_part.txt" spider.craw(filename) |
总结
用python3根据关键词爬取百度百科的内容到这就基本结束了,希望这篇文章能对大家学习python有所帮助。