多线程爬虫:即程序中的某些程序段并行执行,
合理地设置多线程,可以让爬虫效率更高
糗事百科段子普通爬虫和多线程爬虫
分析该网址链接得出:
https://www.qiushibaike.com/8hr/page/页码/
多线程爬虫也就和JAVA的多线程差不多,直接上代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
|
''' #此处代码为普通爬虫 import urllib.request import urllib.error import re headers = ("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36") opener = urllib.request.build_opener() opener.addheaders = [headers] urllib.request.install_opener(opener) for i in range(1,2): url = "https://www.qiushibaike.com/8hr/page/"+str(i)+"/" pagedata = urllib.request.urlopen(url).read().decode("utf-8","ignore") pattern = '< div class = "content" >.*?< span >(.*?)</ span >(.*?)</ div >' datalist = re.compile(pattern,re.S).findall(pagedata) for j in range(0,len(datalist)): print("第"+str(i)+"页第"+str(j)+"个段子内容是:") print(datalist[j]) ''' ''' #此处为多线程介绍代码 import threading #导入多线程包 class A(threading.Thread): #创建一个多线程A def __init__(self): #必须包含的两个方法之一:初始化线程 threading.Thread.__init__(self) def run(self): #必须包含的两个方法之一:线程运行方法 for i in range(0,11): print("我是线程A") class B(threading.Thread): #创建一个多线程A def __init__(self): #必须包含的两个方法之一:初始化线程 threading.Thread.__init__(self) def run(self): #必须包含的两个方法之一:线程运行方法 for i in range(0,11): print("我是线程B") t1 = A() #线程实例化 t1.start() #线程运行 t2 = B() t2.start() ''' #此处为修改后的多线程爬虫 #使用多线程进行奇偶页的爬取 import urllib.request import urllib.error import re import threading headers = ("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36") opener = urllib.request.build_opener() opener.addheaders = [headers] urllib.request.install_opener(opener) class one(threading.Thread): #爬取奇数页内容 def __init__(self): threading.Thread.__init__(self) def run(self): for i in range(1,12,2): url = "https://www.qiushibaike.com/8hr/page/"+str(i)+"/" pagedata = urllib.request.urlopen(url).read().decode("utf-8","ignore") pattern = '< div class = "content" >.*?< span >(.*?)</ span >(.*?)</ div >' datalist = re.compile(pattern,re.S).findall(pagedata) for j in range(0,len(datalist)): print("第"+str(i)+"页第"+str(j)+"段子内容为:") print(datalist[j]) class two(threading.Thread): #爬取奇数页内容 def __init__(self): threading.Thread.__init__(self) def run(self): for i in range(2,12,2): url = "https://www.qiushibaike.com/8hr/page/"+str(i)+"/" pagedata = urllib.request.urlopen(url).read().decode("utf-8","ignore") pattern = '< div class = "content" >.*?< span >(.*?)</ span >(.*?)</ div >' datalist = re.compile(pattern,re.S).findall(pagedata) for j in range(0,len(datalist)): print("第"+str(i)+"页第"+str(j)+"段子内容为:") print(datalist[j]) t1 = one() t2 = two() t1.start() t2.start() |
以上这篇Python多线程爬虫实战_爬取糗事百科段子的实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持服务器之家。
原文链接:http://www.cnblogs.com/Liuyt-61/archive/2017/12/14/8040238.html