如下所示:
# requests模块来请求页面
# lxml模块的html构建selector选择器(格式化响应response)
# from lxml import html
# import requests
# response = requests.get(url).content
# selector = html.formatstring(response)
# hrefs = selector.xpath('/html/body//div[@class='feed-item _j_feed_item']/a/@href')
# 以url = 'https://www.mafengwo.cn/gonglve/ziyouxing/2033.html'为例子
1
2
3
4
|
# python 2.7 import requests from lxml import html import os |
1
2
3
4
5
6
7
8
9
|
# 获取首页中子页的url链接 def get_page_urls(url): response = requests.get(url).content # 通过lxml的html来构建选择器 selector = html.fromstring(response) urls = [] for i in selector.xpath( "/html/body//div[@class='feed-item _j_feed_item']/a/@href" ): urls.append(i) return urls |
1
2
3
4
5
6
7
8
|
# get title from a child's html(div[@class='title']) def get_page_a_title(url): '''url is ziyouxing's a@href''' response = requests.get(url).content selector = html.fromstring(response) # get xpath by chrome's tool --> /html/body//div[@class='title']/text() a_title = selector.xpath( "/html/body//div[@class='title']/text()" ) return a_title |
1
2
3
4
5
|
# 获取页面选择器(通过lxml的html构建) def get_selector(url): response = requests.get(url).content selector = html.fromstring(response) return selector |
1
|
# 通过chrome的开发者工具分析html页面结构后发现,我们需要获取的文本内容主要显示在div[@class='l-topic']和div[@class='p-section']中 |
1
2
3
4
5
6
7
|
# 获取所需的文本内容 def get_page_content(selector): # /html/body/div[2]/div[2]/div[1]/div[@class='l-topic']/p/text() page_title = selector.xpath( "//div[@class='l-topic']/p/text()" ) # /html/body/div[2]/div[2]/div[1]/div[2]/div[15]/div[@class='p-section']/text() page_content = selector.xpath( "//div[@class='p-section']/text()" ) return page_title,page_content |
1
2
3
4
|
# 获取页面中的图片url地址 def get_image_urls(selector): imagesrcs = selector.xpath( "//img[@class='_j_lazyload']/@src" ) return imagesrcs |
1
2
3
4
5
6
7
8
9
10
|
# 获取图片的标题 def get_image_title(selector, num) # num 是从2开始的 url = "/html/body/div[2]/div[2]/div[1]/div[2]/div[" + num + "]/span[@class='img-an']/text()" if selector.xpath(url) is not None : image_title = selector.xpath(url) else : image_title = "map" + str (num) # 没有就起一个 return image_title |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
# 下载图片 def downloadimages(selector,number): '''number是用来计数的''' urls = get_image_urls() num = 2 amount = len (urls) for url in urls: image_title = get_image_title(selector, num) filename = "/home/WorkSpace/tour/words/result" + number + "/+" image_title + ".jpg" if not os.path.exists(filename): os.makedirs(filename) print ( 'downloading %s image %s' % (number, image_title)) with open (filename, 'wb' ) as f: f.write(requests.get(url).content) num + = 1 print "已经下载了%s张图" % num |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
|
# 入口,启动并把获取的数据存入文件中 if __name__ = = '__main__' : url = 'https://www.mafengwo.cn/gonglve/ziyouxing/2033.html' urls = get_page_urls(url) # turn to get response from html number = 1 for i in urls: selector = get_selector(i) # download images downloadimages(selector,number) # get text and write into a file page_title, page_content = get_page_content(selector) result = page_title + '\n' + page_content + '\n\n' path = "/home/WorkSpace/tour/words/result" + num + "/" if not os.path.exists(filename): os.makedirs(filename) filename = path + "num" + ".txt" with open (filename, 'wb' ) as f: f.write(result) print result |
到此就结束了该爬虫,爬取页面前一定要认真分析html结构,有些页面是由js生成,该页面比较简单,没涉及到js的处理,日后的随笔中会有相关分享
以上这篇requests和lxml实现爬虫的方法就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持服务器之家。