triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), ... ...
GROUP_SIZE_M = 64 if N <= 8192: GROUP_SIZE_M = 96 if N <= 4096: GROUP_SIZE_M = 128 if N <= 1024: GROUP_SIZE_M = 256 # allocate output # 分配输出 locks = torch.zeros(2 * GROUP_SIZE_M, dtype=torch.int32, device=w.device) _dw = torch.zeros((GROUP_SIZE_M, N), dty...
triton.Config({'BLOCK_SIZE_M':128,'BLOCK_SIZE_N':128,'BLOCK_SIZE_K':32,'GROUP_SIZE_M':8},num_stages=4,num_warps=4), triton.Config({'BLOCK_SIZE_M':128,'BLOCK_SIZE_N':64,'BLOCK_SIZE_K':32,'GROUP_SIZE_M':8},num_stages=4,num_warps=4), triton.Config({'BLOCK_SIZE_M':6...
locks = torch.zeros(2 * GROUP_SIZE_M, dtype=torch.int32, device=w.device) _dw = torch.zeros((GROUP_SIZE_M, N), dtype=x.dtype, device=w.device) _db = torch.zeros((GROUP_SIZE_M, N), dtype=x.dtype, device=w.device) dw = torch.empty((N, ), dtype=w.dtype, device=w.devic...
group_size_m=min(num_pid_m-first_pid_m,GROUP_SIZE_M) pid_m=first_pid_m+(pid%group_size_m) pid_n=(pid%num_pid_in_group)//group_size_m #--- #CreatepointersforthefirstblocksofAandB. #WewilladvancethispointeraswemoveintheKdirection...
其形状为 [group_size, 3] # dim 0 is group_size, dim 1 is the values of <M, N, K> of each gemm # 第 0 维是 group_size,第 1 维是每个 GEMM 的 <M, N, K> 值 group_gemm_sizes, # device tensor of leading dimension sizes. its shape is [group_size, 3] ...
num_pid_m = tl # Number of programs ids along the N axis # N 轴上程序 id 的数量 num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) # Number of programs in group # 组中程序数量 num_pid_in_group = GROUP_SIZE_M * num_pid_n # Id of the group this program is in ...
在上面的示例A(9, 9) @ B(9, 9) = C(9, 9)中,因为BLOCK_SIZE_M 、BLOCK_SIZE_N以及BLOCK_SIZE_K的设置都是3,并且实际上在上面的例子中GROUP_SIZE_M的设置是1,实在太特殊,所以我们再举个数值上更能体现分组的例子。 A(2048, 2048) @ B(2048, 2048) = C(2048, 2048),其中因为我们预先认为 ...
group_a_ptrs,group_b_ptrs,group_c_ptrs,# device tensorofgemm sizes.its shape is[group_size,3]# 设备张量的GEMM(General Matrix Multiply)大小。其形状为[group_size,3]# dim0is group_size,dim1is the valuesof<M,N,K>ofeach gemm
GROUP_SIZE_M:tl.constexpr,BLOCK_SIZE_N:tl.constexpr):#Map the program id to the elements of X,DX,and DY it should compute.# 映射程序 id 到对应计算的 X,DX,DY row=tl.program_id(0)cols=tl.arange(0,BLOCK_SIZE_N)mask=cols<N ...