0 背景

在分析模型性能时需要精确地统计出模型的推理时间,但仅仅通过在模型推理前后打时间戳然后相减得到的时间其实是Host侧向Device侧下发指令的时间。如下图所示,Host侧下发指令与Device侧计算实际上是异步进行的。
在这里插入图片描述


1 精确统计方法

比较常用的精确统计方法有两种,一种是手动调用同步函数等待Device侧计算完成。另一种是通过Event方法在Device侧记录时间戳。
在这里插入图片描述

下面示例代码中分别给出了直接在模型推理前后打时间戳相减,使用同步函数以及Event方法统计模型推理时间(每种方法都重复50次,忽略前5次推理,取后45次的平均值)。

import time

import torch
import torch.nn as nn


class CustomModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.part0 = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=512, kernel_size=3, stride=2, padding=1),
            nn.GELU(),
            nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, stride=2, padding=1),
            nn.GELU()
        )
        self.part1 = nn.Sequential(
            nn.AdaptiveAvgPool2d(output_size=(1, 1)),
            nn.Flatten(),
            nn.Linear(in_features=1024, out_features=2048),
            nn.GELU(),
            nn.Linear(in_features=2048, out_features=512),
            nn.GELU(),
            nn.Linear(in_features=512, out_features=1)
        )

    def forward(self, x):
        x = self.part0(x)
        x = self.part1(x)
        return x


def cal_time1(model, x):
    with torch.inference_mode():
        time_list = []
        for _ in range(50):
            ts = time.perf_counter()
            ret = model(x)
            td = time.perf_counter()
            time_list.append(td - ts)

        print(f"avg time: {sum(time_list[5:]) / len(time_list[5:]):.5f}")


def cal_time2(model, x):
    device = x.device
    with torch.inference_mode():
        time_list = []
        for _ in range(50):
            torch.cuda.synchronize(device)
            ts = time.perf_counter()
            ret = model(x)
            torch.cuda.synchronize(device)
            td = time.perf_counter()
            time_list.append(td - ts)

        print(f"syn avg time: {sum(time_list[5:]) / len(time_list[5:]):.5f}")


def cal_time3(model, x):
    with torch.inference_mode():
        start_event = torch.cuda.Event(enable_timing=True)
        end_event = torch.cuda.Event(enable_timing=True)
        time_list = []
        for _ in range(50):
            start_event.record()
            ret = model(x)
            end_event.record()
            end_event.synchronize()
            time_list.append(start_event.elapsed_time(end_event) / 1000)

        print(f"event avg time: {sum(time_list[5:]) / len(time_list[5:]):.5f}")


def main():
    device = torch.device("cuda:0")
    model = CustomModel().eval().to(device)

    x = torch.randn(size=(32, 3, 224, 224), device=device)
    cal_time1(model, x)
    cal_time2(model, x)
    cal_time3(model, x)


if __name__ == '__main__':
    main()

终端输出:

avg time: 0.00023
syn avg time: 0.04709
event avg time: 0.04710

通过终端输出可以看到,如果直接在模型推理前后打时间戳相减得到的时间非常短(因为并没有等待Device侧计算完成)。而使用同步函数或者Event方法统计的时间明显要长很多。


2 手动synchronize和Event适用场景

通过上面的代码示例可以看到,通过同步函数统计的时间和Event方法统计的时间基本一致(差异1ms内)。那两者有什么区别呢?如果只是简单统计一个模型的推理时间确实看不出什么差异。但如果要统计一个完整AI应用通路(其中可能包含多个模型以及各种CPU计算)中不同模型的耗时,而又不想影响到整个通路的性能,那么建议使用Event方法。因为使用同步函数可能会让Host长期处于等待状态,等待过程中也无法干其他的事情,从而导致计算资源的浪费。可以看看下面这个示例,整个通路由Model1推理+一段纯CPU计算+Model2推理串行构成,假设想统计一下model1、model2推理分别用了多长时间:

import time

import torch
import torch.nn as nn
import numpy as np


class CustomModel1(nn.Module):
    def __init__(self):
        super().__init__()
        self.part0 = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=512, kernel_size=3, stride=2, padding=1),
            nn.GELU(),
            nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, stride=2, padding=1),
            nn.GELU()
        )

    def forward(self, x):
        x = self.part0(x)
        return x


class CustomModel2(nn.Module):
    def __init__(self):
        super().__init__()
        self.part1 = nn.Sequential(
            nn.AdaptiveAvgPool2d(output_size=(1, 1)),
            nn.Flatten(),
            nn.Linear(in_features=1024, out_features=2048),
            nn.GELU(),
            nn.Linear(in_features=2048, out_features=512),
            nn.GELU(),
            nn.Linear(in_features=512, out_features=1)
        )

    def forward(self, x):
        x = self.part1(x)
        return x


def do_pure_cpu_task():
    x = np.random.randn(1, 3, 512, 512)
    x = x.astype(np.float32)
    x = x * 1024 ** 0.5


def cal_time2(model1, model2, x):
    device = x.device
    with torch.inference_mode():
        time_total_list = []
        time_model1_list = []
        time_model2_list = []

        for _ in range(50):
            torch.cuda.synchronize(device)
            ts1 = time.perf_counter()
            ret = model1(x)
            torch.cuda.synchronize(device)
            td1 = time.perf_counter()

            do_pure_cpu_task()

            torch.cuda.synchronize(device)
            ts2 = time.perf_counter()
            ret = model2(ret)
            torch.cuda.synchronize(device)
            td2 = time.perf_counter()

            time_model1_list.append(td1 - ts1)
            time_model2_list.append(td2 - ts2)
            time_total_list.append(td2 - ts1)

        avg_model1 = sum(time_model1_list[5:]) / len(time_model1_list[5:])
        avg_model2 = sum(time_model2_list[5:]) / len(time_model2_list[5:])
        avg_total = sum(time_total_list[5:]) / len(time_total_list[5:])
        print(f"syn avg model1 time: {avg_model1:.5f}, model2 time: {avg_model2:.5f}, total time: {avg_total:.5f}")


def cal_time3(model1, model2, x):
    with torch.inference_mode():
        model1_start_event = torch.cuda.Event(enable_timing=True)
        model1_end_event = torch.cuda.Event(enable_timing=True)
        model2_start_event = torch.cuda.Event(enable_timing=True)
        model2_end_event = torch.cuda.Event(enable_timing=True)

        time_total_list = []
        time_model1_list = []
        time_model2_list = []

        for _ in range(50):
            model1_start_event.record()
            ret = model1(x)
            model1_end_event.record()

            do_pure_cpu_task()

            model2_start_event.record()
            ret = model2(ret)
            model2_end_event.record()
            model2_end_event.synchronize()

            time_model1_list.append(model1_start_event.elapsed_time(model1_end_event) / 1000)
            time_model2_list.append(model2_start_event.elapsed_time(model2_end_event) / 1000)
            time_total_list.append(model1_start_event.elapsed_time(model2_end_event) / 1000)

        avg_model1 = sum(time_model1_list[5:]) / len(time_model1_list[5:])
        avg_model2 = sum(time_model2_list[5:]) / len(time_model2_list[5:])
        avg_total = sum(time_total_list[5:]) / len(time_total_list[5:])
        print(f"event avg model1 time: {avg_model1:.5f}, model2 time: {avg_model2:.5f}, total time: {avg_total:.5f}")


def main():
    device = torch.device("cuda:0")
    model1 = CustomModel1().eval().to(device)
    model2 = CustomModel2().eval().to(device)

    x = torch.randn(size=(32, 3, 224, 224), device=device)
    cal_time2(model1, model2, x)
    cal_time3(model1, model2, x)


if __name__ == '__main__':
    main()

终端输出:

syn avg model1 time: 0.04725, model2 time: 0.00125, total time: 0.05707
event avg model1 time: 0.04697, model2 time: 0.00099, total time: 0.04797

通过终端打印的结果可以看到无论是使用同步函数还是Event方法统计的model1、model2的推理时间基本是一致的。但对于整个通路而言使用同步函数时总时间明显变长了。下图大致解释了为什么使用同步函数时导致整个通路变长的原因,主要是在model1发送完指令后使用同步函数时会一直等待Device侧计算结束,期间啥也不能干。而使用Event方法时在model1发送完指令后不会阻塞Host,可以立马去进行后面的CPU计算任务。

在这里插入图片描述

Logo

开放原子开发者工作坊旨在鼓励更多人参与开源活动,与志同道合的开发者们相互交流开发经验、分享开发心得、获取前沿技术趋势。工作坊有多种形式的开发者活动,如meetup、训练营等,主打技术交流,干货满满,真诚地邀请各位开发者共同参与!

更多推荐