当前位置: 首页 > news >正文

响应式网站视频怎么做人工在线客服系统

响应式网站视频怎么做,人工在线客服系统,网站出现风险如何处理,wordpress连续id测试bert_base不同并行方式下的推理性能 一.测试数据二.测试步骤1.生成bert配置文件2.安装依赖3.deepspeed 4卡tp并行4.FSDP 4卡并行5.手动将权值平均拆到4张卡,单进程多卡推理6.手动切分成4份,基于NCCL实现pipeline并行 本文测试了bert_base模型在不同并行方式下的推理性能 约…

测试bert_base不同并行方式下的推理性能

  • 一.测试数据
  • 二.测试步骤
    • 1.生成bert配置文件
    • 2.安装依赖
    • 3.deepspeed 4卡tp并行
    • 4.FSDP 4卡并行
    • 5.手动将权值平均拆到4张卡,单进程多卡推理
    • 6.手动切分成4份,基于NCCL实现pipeline并行

本文测试了bert_base模型在不同并行方式下的推理性能

约束

  • 1.当前服务器上GPU不支持P2P且链路仅为PCIE GEN1 X16

可参考的点

  • deepspeed 推理的使用
  • FSDP推理的使用
  • 如果将权值拆到多卡
  • 自定义pipeline并行(切分网络并插入自定义修改)
  • 如何自动处理pytorch算子输入tensor不在同一个设备上的问题

一.测试数据

并行方式QPSGPU利用率
deepspeed 4卡tp并行175.73rank:0 util:100.00
rank:1 util:100.00
rank:2 util:97.00
rank:3 util:97.00
FSDP 4卡并行137.80rank:0 util:40.00
rank:1 util:40.00
rank:2 util:39.00
rank:3 util:40.00
手动将权值平均拆到4张卡,单进程多卡推理29.34
手动切分成4份,基于NCCL实现pipeline并行244.76rank:1 util:40.00
rank:0 util:97.00
rank:2 util:39.00
rank:3 util:78.00

二.测试步骤

1.生成bert配置文件

tee ./config.json <<-'EOF'
{"architectures": ["BertForMaskedLM"],"attention_probs_dropout_prob": 0.1,"directionality": "bidi","hidden_act": "gelu","hidden_dropout_prob": 0.1,"hidden_size": 768,"initializer_range": 0.02,"intermediate_size": 3072,"layer_norm_eps": 1e-12,"max_position_embeddings": 512,"model_type": "bert","num_attention_heads": 12,"num_hidden_layers": 12,"pad_token_id": 0,"pooler_fc_size": 768,"pooler_num_attention_heads": 12,"pooler_num_fc_layers": 3,"pooler_size_per_head": 128,"pooler_type": "first_token_transform","type_vocab_size": 2,"vocab_size": 21128
}
EOF

2.安装依赖

pip install nvidia-ml-py3

3.deepspeed 4卡tp并行

tee ds_bert_infer.py <<-'EOF'
import torch
import deepspeed
import os
from deepspeed.accelerator import get_accelerator
import time
import torch.distributed as dist
import pynvml
import numpy as np
import threading#统计GPU利用率
class PynvmlGPUUtilizationThread(threading.Thread):def __init__(self,device,interval=1):super().__init__()self.interval = intervalself.running = Trueself.device=device        self.handle = pynvml.nvmlDeviceGetHandleByIndex(device)self.utilizations=[]def run(self):while self.running:self.get_and_print_gpu_utilization()time.sleep(self.interval)def stop(self):self.running = Falsedef get_and_print_gpu_utilization(self):utilization = pynvml.nvmlDeviceGetUtilizationRates(self.handle)self.utilizations.append(utilization.gpu)def data(self):return np.max(self.utilizations)def inference():    deepspeed.init_distributed(dist_backend='nccl')world_size = torch.distributed.get_world_size()local_rank=int(os.environ['LOCAL_RANK'])rank=torch.distributed.get_rank()pynvml.nvmlInit()torch.manual_seed(1)from transformers import AutoModelForMaskedLM,BertConfigconfig=BertConfig.from_pretrained("./config.json")model = AutoModelForMaskedLM.from_config(config)model.eval()engine = deepspeed.init_inference(model,tensor_parallel={"tp_size": world_size},dtype=torch.float32,replace_with_kernel_inject=True)device=get_accelerator().current_device_name()input_tokens=torch.randint(0,config.vocab_size,(1,128)).to(device)epoch=1024gpu_thread = PynvmlGPUUtilizationThread(local_rank,interval=1)gpu_thread.start()    t0=time.time()for i in range(epoch):outputs = engine(input_tokens)dist.barrier()torch.cuda.synchronize()t1=time.time()gpu_thread.stop()gpu_thread.join()       time.sleep(0.2*rank)        if rank==0:qps=epoch/(t1-t0)print(f"default stream qps:{qps:.2f}")print(f"rank:{rank} util:{gpu_thread.data():.2f}")stream_nbs=[1,2,4,8]    for n in stream_nbs:dist.barrier()if rank==0:print("-----------------------------------------------")streams=[torch.cuda.Stream() for _ in range(n)]total_samples=0        gpu_thread = PynvmlGPUUtilizationThread(local_rank,interval=1)gpu_thread.start()t0=time.time()for _ in range(epoch//n):for i in range(n):with torch.cuda.stream(streams[i]):total_samples+=1outputs = engine(input_tokens)        dist.barrier()torch.cuda.synchronize()t1=time.time()gpu_thread.stop()gpu_thread.join()    time.sleep(0.2*rank)        if rank==0:qps=total_samples/(t1-t0)print(f"{n} streams qps:{qps:.2f}")                print(f"rank:{rank} util:{gpu_thread.data():.2f}")if __name__ == "__main__":inference()EOF
deepspeed --num_gpus=4 ds_bert_infer.py

输出

------------------------------------------------------
default stream qps: 147.10
rank:0 util:90.00
rank:1 util:86.00
rank:2 util:89.00
rank:3 util:89.00
-----------------------------------------------
1 streams qps:162.62
rank:0 util:100.00
rank:1 util:100.00
rank:2 util:92.00
rank:3 util:88.00
-----------------------------------------------
2 streams qps:177.31
rank:0 util:100.00
rank:1 util:100.00
rank:2 util:99.00
rank:3 util:98.00
-----------------------------------------------
4 streams qps:176.11
rank:0 util:100.00
rank:1 util:100.00
rank:2 util:98.00
rank:3 util:97.00
-----------------------------------------------
8 streams qps:175.73
rank:0 util:100.00
rank:1 util:100.00
rank:2 util:97.00
rank:3 util:97.00

4.FSDP 4卡并行

tee fsdp_bert_infer.py <<-'EOF'
import time
import os
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
import torchvision.models as models
import torch.nn as nn
import torch.nn.init as init
import time
import pynvml
import numpy as np
import threadingclass PynvmlGPUUtilizationThread(threading.Thread):def __init__(self,device,interval=1):super().__init__()self.interval = intervalself.running = Trueself.device=device        self.handle = pynvml.nvmlDeviceGetHandleByIndex(device)self.utilizations=[]def run(self):while self.running:self.get_and_print_gpu_utilization()time.sleep(self.interval)def stop(self):self.running = Falsedef get_and_print_gpu_utilization(self):utilization = pynvml.nvmlDeviceGetUtilizationRates(self.handle)self.utilizations.append(utilization.gpu)def data(self):return np.max(self.utilizations)def cleanup():dist.destroy_process_group()def demo_fsdp(rank, world_size,multi_stream):pynvml.nvmlInit()device = torch.device(f"cuda:{rank}")torch.manual_seed(1)from transformers import AutoModelForMaskedLM,BertConfigconfig=BertConfig.from_pretrained("./config.json")model = AutoModelForMaskedLM.from_config(config)model.eval()fsdp_model = FSDP(model,forward_prefetch=True).to(device)input_tokens=torch.randint(0,config.vocab_size,(1,128)).to(device)epoch_sz=1024gpu_thread = PynvmlGPUUtilizationThread(local_rank,interval=1)gpu_thread.start()    sz=8total_sample=0streams=[torch.cuda.Stream() for _ in range(sz)]t0=time.time()for epoch in range(epoch_sz):with torch.no_grad():outputs=[]if multi_stream:for i in range(sz):with torch.cuda.stream(streams[i]):total_sample+=1outputs.append(fsdp_model(input_tokens))else:output = fsdp_model(input_tokens)total_sample+=1torch.cuda.synchronize(rank)t1=time.time()gpu_thread.stop()gpu_thread.join()       time.sleep(0.2*rank)        if rank==0:qps=total_sample/(t1-t0)print(f"qps:{qps:.2f}")print(f"rank:{rank} util:{gpu_thread.data():.2f}")cleanup()if __name__ == "__main__":dist.init_process_group(backend='nccl')world_size = torch.distributed.get_world_size()rank=torch.distributed.get_rank()local_rank=int(os.environ['LOCAL_RANK'])torch.cuda.set_device(local_rank)demo_fsdp(local_rank,world_size,True)
EOF
torchrun -m --nnodes=1 --nproc_per_node=4 fsdp_bert_infer

输出

qps:137.80
rank:0 util:40.00
rank:1 util:40.00
rank:2 util:39.00
rank:3 util:40.00

5.手动将权值平均拆到4张卡,单进程多卡推理

tee split_bert_infer.py <<-'EOF'
import torch
import os
import time
from torch.utils._python_dispatch import TorchDispatchMode
from dataclasses import dataclass
from typing import Any@dataclass
class _ProfilerState:cls: Anyobject: Any = Noneclass EmptyModule(torch.nn.Module):def __init__(self):super(EmptyModule, self).__init__()passdef forward(self,x):return xclass TorchDumpDispatchMode(TorchDispatchMode):def __init__(self,parent):super().__init__()self.parent=parentself.op_index=0self.cvt_count=0def get_max_gpu_id(self,tensors):max_gpu_id = -1max_index = -1tensor_index=[]for i, tensor in enumerate(tensors):if not isinstance(tensor, torch.Tensor):continuetensor_index.append(i)if tensor.is_cuda:gpu_id = tensor.get_device()if gpu_id > max_gpu_id:max_gpu_id = gpu_idmax_index = iif max_gpu_id == -1:return None, None,tensor_indexreturn max_index, max_gpu_id,tensor_indexdef convert(self,op_type,tensor_list):index, gpu_id,tensor_index = self.get_max_gpu_id(tensor_list)if index is None:returnkeep_index=set(tensor_index)-set([index])device=torch.device(f"cuda:{gpu_id}")for i in keep_index:if tensor_list[i].device!=device:#print(f"{op_type} {i} {tensor_list[i].device} -> {device}")tensor_list[i].data=tensor_list[i].data.to(device,non_blocking=True) #卡间通信是串行的,所有多stream并不能充分提升性能def __torch_dispatch__(self, func, types, args=(),kwargs=None):func_packet = func._overloadpacketif kwargs is None:kwargs = {}op_type=f"{func}"self.op_index+=1if isinstance(args, list) or isinstance(args, tuple):self.convert(op_type,args)elif isinstance(args[0], list) or isinstance(args[0], tuple):self.convert(op_type,args[0])else:print(op_type)output= func(*args,**kwargs)return outputclass TorchDumper:def __init__(self,**kwargs):self.p= _ProfilerState(TorchDumpDispatchMode)self.kwargs=kwargsdef __enter__(self):if self.p.object is None:o = self.p.cls(self,**self.kwargs)o.__enter__()self.p.object = oelse:self.p.object.step()return selfdef __exit__(self, exc_type, exc_val, exc_tb):TorchDumper._CURRENT_Dumper = Noneif self.p.object is not None:self.p.object.__exit__(exc_type, exc_val, exc_tb)del self.p.objecttorch.manual_seed(1)
from transformers import AutoModelForMaskedLM,BertConfig
config=BertConfig.from_pretrained("./config.json")
model = AutoModelForMaskedLM.from_config(config)
model.eval()cur_dev=0
from collections import OrderedDictparam_size=OrderedDict()
total_size=0
for name, param in model.named_parameters():#print(f"{name} {param.device} {param.shape}")sz=param.numel()*param.element_size()key=".".join(name.split(".")[:-1])if key not in param_size:param_size[key]=0param_size[key]+=sztotal_size+=szfor name, param in model.named_buffers():#print(name,param.device)sz=param.numel()*param.element_size()key=".".join(name.split(".")[:-1])if key not in param_size:param_size[key]=0param_size[key]+=sztotal_size+=szsz_per_dev=total_size/4
cur_size=0dev_map=OrderedDict()
for k,v in param_size.items():sz=vcur_size+=szif cur_size>=sz_per_dev:print(cur_dev,cur_size)cur_size=0cur_dev+=1dev_map[k]=cur_devfor name, param in model.named_parameters():key=".".join(name.split(".")[:-1])op=dict(model.named_parameters())[name]device=f"cuda:{dev_map[key]}"op.data=op.data.to(device)for name, param in model.named_buffers():key=".".join(name.split(".")[:-1])op=dict(model.named_buffers())[name]device=f"cuda:{dev_map[key]}"op.data=op.data.to(device)with TorchDumper():sz=4input_tokens=torch.randint(0,config.vocab_size,(1,128)).to("cuda:0")streams=[torch.cuda.Stream() for _ in range(sz)]batch_size=0t0=time.time()for epoch in range(1024):outputs=[]for i in range(sz):with torch.cuda.stream(streams[i]):batch_size+=1outputs.append(model(input_tokens))torch.cuda.synchronize()t1=time.time()print("qps:",batch_size/(t1-t0))
EOF
python split_bert_infer.py

输出

qps: 29.34

6.手动切分成4份,基于NCCL实现pipeline并行

tee pp_bert_infer.py <<-'EOF'
import torch
import os
import time
from collections import OrderedDict
import torch.distributed as dist
import torch.nn as nn
import torch.nn.init as init
import numpy as np
import time
import pynvml
import numpy as np
import threadingclass EmptyModule(torch.nn.Module):def __init__(self):super(EmptyModule, self).__init__()passdef forward(self,x):return x[0]class PynvmlGPUUtilizationThread(threading.Thread):def __init__(self,device,interval=1):super().__init__()self.interval = intervalself.running = Trueself.device=device        self.handle = pynvml.nvmlDeviceGetHandleByIndex(device)self.utilizations=[]def run(self):while self.running:self.get_and_print_gpu_utilization()time.sleep(self.interval)def stop(self):self.running = Falsedef get_and_print_gpu_utilization(self):utilization = pynvml.nvmlDeviceGetUtilizationRates(self.handle)self.utilizations.append(utilization.gpu)def data(self):return np.max(self.utilizations)pynvml.nvmlInit()dist.init_process_group(backend='nccl')
world_size = torch.distributed.get_world_size()
rank=torch.distributed.get_rank()
local_rank=int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)torch.manual_seed(1)
from transformers import AutoModelForMaskedLM,BertConfig
config=BertConfig.from_pretrained("./config.json")
model = AutoModelForMaskedLM.from_config(config)
model.eval()divided=[]
#查看modeling_bert.py找到相关的名字
submodules=[]
submodules.append(("embeddings",model.bert.embeddings))        
for i,m in enumerate(model.bert.encoder.layer[:3]):submodules.append((f"{i}",m))submodules.append((f"{i}-1",EmptyModule()))   
divided.append(submodules)submodules=[]
for i,m in enumerate(model.bert.encoder.layer[3:7]):submodules.append((f"{i}",m))submodules.append((f"{i}-1",EmptyModule()))   
divided.append(submodules)submodules=[]for i,m in enumerate(model.bert.encoder.layer[7:11]):submodules.append((f"{i}",m))submodules.append((f"{i}-1",EmptyModule()))   
divided.append(submodules)submodules=[]for i,m in enumerate(model.bert.encoder.layer[11:]):submodules.append((f"{i}",m))submodules.append((f"{i}-1",EmptyModule()))
submodules.append(("cls",model.cls))        
divided.append(submodules)device=f"cuda:{local_rank}"
example_input=torch.randint(0,config.vocab_size,(1,128)).to(device)
submodule=torch.nn.Sequential(OrderedDict(divided[local_rank])).to(device)sreq=None
ts=[]gpu_thread = PynvmlGPUUtilizationThread(local_rank,interval=1)
gpu_thread.start()
t0=time.time()
for epoch in range(1000):if sreq is not None and not sreq.is_completed():sreq.wait()sreq=Noneif local_rank!=0:tensor_size = torch.empty((3,), dtype=torch.int64).to(device)torch.distributed.recv(tensor_size,local_rank-1)example_input = torch.empty(tensor_size.tolist()).to(device)torch.distributed.recv(example_input,local_rank-1)#print("recv:",local_rank-1,example_input.shape)else:torch.manual_seed(1)output=submodule(example_input)if local_rank<world_size-1:#print("local_rank out:",output.shape)tensor_size = torch.tensor(output.size(), dtype=torch.int64).to(device)torch.distributed.isend(tensor_size,local_rank+1)sreq=torch.distributed.isend(output,local_rank+1)#torch.distributed.send(output,local_rank+1)elif local_rank==world_size-1:ts.append(time.time())
dist.barrier()
t1=time.time()gpu_thread.stop()
gpu_thread.join()   
time.sleep(0.2*local_rank)        
print(f"rank:{local_rank} util:{gpu_thread.data():.2f}")if local_rank==world_size-1:ts=ts[len(ts)//2:]print("latency:",ts[1]-ts[0],"qps:",len(ts)/(ts[-1]-ts[0]),1000/(t1-t0))
EOF
torchrun -m --nnodes=1 --nproc_per_node=4 pp_bert_infer	

输出:

rank:1 util:40.00
rank:0 util:97.00
rank:2 util:39.00
rank:3 util:78.00
latency: 0.002396106719970703 qps: 408.6954420411698 244.76515394402227
http://www.hkea.cn/news/227886/

相关文章:

  • 制作网站的成本规划公司如何建立网站
  • html语言做网站石嘴山网站seo
  • 做最好的言情网站官网seo优化
  • 云南建设监理协会网站营销失败案例分析
  • 怎么样做淘宝优惠券网站搜索引擎营销的优缺点
  • wordpress动态订单seo社区
  • 网站域没到期不能续费吗google谷歌搜索
  • 厦门好的做网站公司网络营销推广方式都有哪些
  • 重庆市建设工程信息官网站自己做网站的流程
  • 网站建设公司怎么做网络营销网站推广
  • 360应用商店seo服务套餐
  • 废橡胶网站建设个人博客网页设计
  • 什么网站做一手项目好域名查询官网
  • 做日用品的要找什么网站好站长工具端口检测
  • 贵州软件开发 网站开发手机版百度一下
  • 企业网站建立答辩问题百度怎么发布广告
  • 温州快建网站地推拉新接单网
  • 濉溪县城乡建设委员会燃气办网站热狗网站排名优化外包
  • 网站能不能自己做免费的seo教程
  • 湖南的商城网站建设优化教程网下载
  • 做网站需要哪些工程师西安seo诊断
  • tp做的网站封装成app2023北京封控了
  • 增城做网站要多少钱推广普通话手抄报
  • 石家庄网站系统开发智能搜索引擎
  • 迅速网站网络营销平台推广方案
  • 学前端要逛那些网站微信引流主动被加软件
  • 韩国flash网站免费手机网站建站平台
  • 东莞做网站卓诚网络昆明长尾词seo怎么优化
  • WordPress个性萌化插件郑州seo优化哪家好
  • 专业手机移动网站建设免费的seo优化