本节主要讨论多线程和多进程
本人对于多线程多进程使用不多,这里只是简要讨论两种用法。
1.多线程并行下载
python的多线程不是真正的多线程。他只适合IO密集型操作而不适合CPU密集型操作。
以下程序是一个爬虫+多线程程序。
仅供学习交流,严禁其他用途!!
#coding=utf-8
#!/usr/bin/python
# 导入requests库
import requests
# 导入文件操作库
import os
import bs4
from bs4 import BeautifulSoup
import sys
import importlib
import random
import time
from threading import Thread
importlib.reload(sys)
# 越多越好
meizi_headers = [
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0",
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Mobile Safari/537.36'
]
# 给请求指定一个请求头来模拟chrome浏览器
global headers
headers = {'User-Agent': random.choice(meizi_headers)}
# 爬图地址
mziTu = 'https://www.2meinv.com/'
# 定义存储位置
global save_path
save_path = 'F:\BeautifulPictures'
# 创建文件夹
def createFile(file_path):
if os.path.exists(file_path) is False:
os.makedirs(file_path)
# 切换路径至上面创建的文件夹
os.chdir(file_path)
# 下载文件
def download(page_no, file_path):
global headers
res_sub = requests.get(page_no, headers=headers)
# 解析html
soup_sub = BeautifulSoup(res_sub.text, 'html.parser')
# 获取页面的栏目地址
all_a = soup_sub.find('ul',class_='detail-list').find_all('a',target='_blank')
count = 0
for a in all_a:
count = count + 1
if (count % 2) == 0:
headers = {'User-Agent': random.choice(meizi_headers)}
print("内页第几页:" + str(count))
# 提取href
href = a.attrs['href']
print("套图地址:" + href)
res_sub_1 = requests.get(href, headers=headers)
soup_sub_1 = BeautifulSoup(res_sub_1.text, 'html.parser')
# ------ 这里最好使用异常处理 ------
try:
# 获取套图的最大数量
k = 0
if soup_sub_1.find('div', class_='page-show').find_all('a')[0].text == '<< 上一篇':
k = 7
else : k = 6
pic_max = soup_sub_1.find('div', class_='page-show').find_all('a')[k].text
print("套图数量:" + pic_max)
for j in range(1, int(pic_max) + 1):
# 单位为秒,1-3 随机数
time.sleep(random.randint(1, 3))
headers = {'User-Agent': random.choice(meizi_headers)}
# print("子内页第几页:" + str(j))
# j int类型需要转字符串
href_sub = href[:-5] + "-" + str(j) + ".html"
print("图片地址:"+href_sub)
res_sub_2 = requests.get(href_sub, headers=headers)
soup_sub_2 = BeautifulSoup(res_sub_2.text, "html.parser")
img = soup_sub_2.find('div', class_='pp hh').find('img')
if isinstance(img, bs4.element.Tag):
# 提取src
url = img.attrs['src']
array = url.split('/')
file_name = array[len(array)-1]
# 防盗链加入Referer
headers = {'User-Agent': random.choice(meizi_headers), 'Referer': url}
img = requests.get(url, headers=headers)
print('开始保存图片', img)
f = open(file_path + '/' + file_name, 'ab')
f.write(img.content)
print(file_name, '图片保存成功!')
f.close()
except Exception as e:
print(e)
# def pre():
# res = requests.get(mziTu, headers=headers)
# # 使用自带的html.parser解析
# soup = BeautifulSoup(res.text, 'html.parser')
# # 创建文件夹
# createFile(save_path)
# # 获取首页总页数
# img_max = soup.find('div', class_='page-show').find_all('a')[5].text
# # print("总页数:"+img_max)
# # for i in range(1, int(img_max) + 1):
# # 获取每页的URL地址
# 主方法
def main(i):
if i == 1:
page = mziTu
else:
page = mziTu + 'index-' + str(i) + '.html'
file = save_path + '\\' + str(i)
createFile(file)
# 下载每页的图片
print("套图页码:" + page)
download(page, file)
class spider(Thread) :
def __init__(self, page) :
Thread.__init__(self)
self.page = page
def run(self) :
main(self.page)
'''
每一个线程爬取一页图片,每一页图片包含若干套图片
'''
if __name__ == '__main__':
for e in range(10) :
threads = []
for i in range(1 + e * 10 , e * 10+ 11) : # 爬取10页图片
t = spider(i)
threads.append(t)
t.start()
for t in threads :
t.join()
2. 多进程处理CPU密集型程序
由于python中全局解释器锁GIL的存在,在任意时刻只允许一个线程在解释器中运行,因此python的多线程不适合处理cpu密集型的任务。
想要处理cpu密集型任务,可以使用多进程模型。
以下程序是对多线程和多进程的对比。
结论是:
20.910259008407593 s ,多进程更快
35.15505409240723 s , 多线程稍慢
from ast import arg
from threading import Thread
from multiprocessing import Process
import time
def isArmstrong(n) :
a , t = [] , n
while t > 0 :
a.append(t % 10)
t //= 10
k = len(a)
return sum(x ** k for x in a ) == n
def findArmstrong(a,b) :
print(a , b)
res = [k for k in range(a,b) if isArmstrong(k)]
print('%s ~ %s:%s' % (a,b,res))
def findByThread(*argslist) :
workers = []
for args in argslist :
worker = Thread(target=findArmstrong , args=args)
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
def findByProcess(*argslist) :
workers = []
for args in argslist :
worker = Process(target=findArmstrong , args=args) # args 是target的参数
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
if __name__ == '__main__' :
start = time.time()
findByProcess((20000000 , 25000000) , (25000000 , 30000000))
# findByThread((20000000 , 25000000) , (25000000 , 30000000))
print(time.time() - start)