一、线程的概述
进程的缺点: 1. 非常消耗资源,所以每次'主线程...'会先与子进程之前进行打印,一次我 们的子进程不能无限的打开 2. 如果开了过多的子进程,cpu在进程的模式下切来切去是非常耗费时间的 因此引入线程: 线程的出现,实就是要解决上面的两个问题 轻量级的进程====> 线程 线程VS进程: - 线程是一个轻量级的进程 - 一个进程里面至少有一个线程, 可以有多个线程 - 线程是具体干活的 线程的好处: - 线程开启速度快于进程 - 线程之间的数据是共享的 - CPU在线程之间的切换速度远快于进程 使用的场景(进程线程) 线程: 有大量IO存在的时候,使用线程 进程: 有密集计算的时候使用 同一个进城之间的数据在线程之间的是共享的
二、开启线程的两种方式

# 方式一:使用函数的方式开启 # from threading import Thread # import time # # def task(arg): # print('%s is running...'% arg) # time.sleep(1) # print('%s is done...'% arg) # # if __name__ == '__main__': # t = Thread(target=task, args=('子线程',)) # t.start() #开启的瞬间就执行了子线程的所有内容 # # print('主线程..')

# 方式二:类的方式开启 from threading import Thread import time class MyThread(Thread): def run(self): print('%s is running...'% self) time.sleep(1) print('%s is done...'% self) if __name__ == '__main__': t = MyThread() t.start() print('主线程...')
三、同一进程下,线程的数据是共享的

from threading import Thread x = 100 def task(): global x x = 0 if __name__ == '__main__': t = Thread(target=task) t.start() print(x) #结果x=0, 数据是共享的
四、进程和线程的执行速度对比

from threading import Thread from multiprocessing import Process import time def task(arg): print('%s is running...'% arg) time.sleep(1) print('%s is done...'% arg) if __name__ == '__main__': p = Process(target=task, args=('子进程',)) p.start() t = Thread(target=task, args=('子线程',)) t.start() #开启的瞬间就执行了子线程的所有内容 print('主线程..')
五、线程的其他属性

# 例1: # from threading import Thread # import time # # def task(): # print('xxxx') # # if __name__ == '__main__': # t = Thread(target=task) # t.start() # t.join() #等待子线程执行完毕 # print(t.is_alive()) #判断子线程是否存活 # print('主线程...') # 例2:计算10个子线程运行的时间总和 import time from threading import Thread def task(): print('xxx') time.sleep(1) if __name__ == '__main__': start = time.time() t_l = [] for i in range(10): t = Thread(target=task) t_l.append(t) t.start() for t in t_l: t.join() print('子线程总运行时间为%s'%(time.time()-start)) print(t_l)
六、守护线程

# 守护线程例1: # 结论:当其他非守护线程执行完毕时,开启守护线程的子线程也随之结束 from threading import Thread from threading import current_thread import time def task(): print('%s is running...'% current_thread().name) time.sleep(3) print('%s is done...'% current_thread().name) if __name__ == '__main__': t = Thread(target=task, name='守护线程') t.daemon = True t.start() print('主线程...')

# 守护线程例2: # 结论:当其他非守护线程执行全部完毕时,开启守护线程的子线程才随之结束 from threading import Thread import time def foo(): print(123) time.sleep(3) print('end123') def bar(): print(456) time.sleep(1) print('end456') t1 = Thread(target=foo) t2 = Thread(target=bar) t2.daemon = True t1.start() t2.start() print('主线程...')

# 守护进程例1 # 结论:当某个子进程开启了守护进程时,当父进程执行完毕时,该子进程也随之结束 from multiprocessing import Process import time def foo(): print(123) time.sleep(3) print('end123') def bar(): print(456) time.sleep(1) print('end456') p1 = Process(target=foo) p2 = Process(target=bar) p1.daemon = True # p2.daemon = True p1.start() p2.start() print('主线程...')
七、线程的互斥锁

# 例1: 线程运行速度太快了,导致数据失真(因为100个线程同时去拿x的时候得到x的值均为100。。。所以导致该例子的结打印x的时候值为99) # from threading import Thread # import time # # x = 100 # # def task(): # global x # tmp = x # time.sleep(0.1) #模拟网络延迟 # x = tmp - 1 # # # # if __name__ == '__main__': # t_l = [] # # for i in range(100): # t = Thread(target=task,) # t_l.append(t) # t.start() # # for j in t_l: # j.join() # # print(x)

# 例2: 使用线程互斥锁后的优化效果 # 线程的互斥锁基本和进程的互斥锁效果一致,(而进程是from multiprocess import Lock) from threading import Thread from threading import Lock import time mutex = Lock() x = 100 def task(): global x mutex.acquire() #获得锁,加一把锁 tmp = x # time.sleep(0.1) x = tmp - 1 mutex.release() #释放锁 if __name__ == '__main__': t_l = [] for i in range(100): t = Thread(target=task,) t_l.append(t) t.start() for j in t_l: j.join() print(x)
八、线程的死锁和递归锁

例1: 下列代码进入了一个递归死锁的状态 from threading import Thread from threading import Lock import time mutexA = Lock() mutexB = Lock() class MyThread(Thread): def run(self): self.f1() self.f2() def f1(self): mutexA.acquire() print('%s 拿到了A锁'% self.name) mutexB.acquire() print('%s 拿到了B锁'% self.name) mutexA.release() mutexB.release() def f2(self): mutexB.acquire() print('%s 拿到了B锁'% self.name) time.sleep(1) mutexA.acquire() print('%s 拿到了A锁'% self.name) mutexB.release() mutexA.release() if __name__ == '__main__': for i in range(10): t = MyThread() t.start()

# 例:使用递归锁Rlock 来解决上例的问题 from threading import Thread from threading import Lock from threading import RLock import time obj = RLock() #可以连续的acquire() # mutexA = Lock() # mutexB = Lock() mutexA = obj mutexB = obj class MyThread(Thread): def run(self): self.f1() self.f2() def f1(self): mutexA.acquire() print('%s 拿到了A锁'% self.name) mutexB.acquire() print('%s 拿到了B锁'% self.name) mutexA.release() mutexB.release() def f2(self): mutexB.acquire() print('%s 拿到了B锁'% self.name) time.sleep(1) mutexA.acquire() print('%s 拿到了A锁'% self.name) mutexB.release() mutexA.release() if __name__ == '__main__': for i in range(10): t = MyThread() t.start()
九、线程的信号量

from threading import Thread from threading import Semaphore from threading import current_thread import time import random sema = Semaphore(5) #表示每次里面只有5个坑位 def task(): with sema: print('%s 正在上厕所' % current_thread().name) time.sleep(random.randint(1,3)) if __name__ == '__main__': for i in range(20): t = Thread(target=task,) t.start()
十、线程池(py3中引入了新的整合包)
线程池的使用

import time import os from concurrent.futures import ThreadPoolExecutor #引入线程池 from concurrent.futures import ProcessPoolExecutor #引入进程池 # 进程池和线程池的使用方法一模一样 # 下例为线程池的示例 def task(i): print(i) time.sleep(1) if __name__ == '__main__': ''' 建议开启线程池的个数 = cpu个数*5 ''' # print(os.cpu_count()) #4个 t = ThreadPoolExecutor(20) #在线程池子里面放了20个线程 res_li = [] for i in range(30): res = t.submit(task, i ) # 和start一致,就是开始执行的意思 res_li.append(res) t.shutdown() #合并了进程池中的close + join的工作,作用就是停止提交任务,并等待子线程执行完毕 for res in res_li: res.result() #类似appy_async中的get()