From 8d4b4f51fce3aeb780310eff9c3ac4f4bd619fe8 Mon Sep 17 00:00:00 2001 From: Pradeep Kumar Date: Tue, 1 Jul 2025 07:20:14 +0000 Subject: [PATCH] [NVVM][NVPTX] Add support for tcgen05.mma This commit adds support for tcgen05.mma instructions in NVPTX which tests under CodeGen/NVPTX/tcgen05-mma*. This tcgen05.mma instructions are modeled as intrinsics with multiple arguments to model cta_group, mma kind, collector usage etc. The rationale for the design is present documented in NVPTXUsage.rst file --- llvm/docs/NVPTXUsage.rst | 388 +++++++++- llvm/include/llvm/IR/IntrinsicsNVVM.td | 430 ++++++++++- llvm/include/llvm/IR/NVVMIntrinsicUtils.h | 9 + llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 313 ++++++++ llvm/lib/Target/NVPTX/NVPTXISelLowering.h | 39 +- llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 482 ++++++++++++- llvm/lib/Target/NVPTX/NVPTXSubtarget.h | 2 +- .../NVPTX/tcgen05-mma-block-scale-ptx88.ll | 526 ++++++++++++++ .../CodeGen/NVPTX/tcgen05-mma-block-scale.ll | 291 ++++++++ .../NVPTX/tcgen05-mma-disable-output-lane.ll | 681 ++++++++++++++++++ .../test/CodeGen/NVPTX/tcgen05-mma-scale-d.ll | 412 +++++++++++ llvm/test/CodeGen/NVPTX/tcgen05-mma-ws.ll | 569 +++++++++++++++ llvm/test/CodeGen/NVPTX/tcgen05-mma.ll | 601 ++++++++++++++++ 13 files changed, 4733 insertions(+), 10 deletions(-) create mode 100644 llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-ptx88.ll create mode 100644 llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale.ll create mode 100644 llvm/test/CodeGen/NVPTX/tcgen05-mma-disable-output-lane.ll create mode 100644 llvm/test/CodeGen/NVPTX/tcgen05-mma-scale-d.ll create mode 100644 llvm/test/CodeGen/NVPTX/tcgen05-mma-ws.ll create mode 100644 llvm/test/CodeGen/NVPTX/tcgen05-mma.ll diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst index d28eb6860c33a..1b61df2cf5254 100644 --- a/llvm/docs/NVPTXUsage.rst +++ b/llvm/docs/NVPTXUsage.rst @@ -1945,6 +1945,388 @@ The last argument `i1 %unpack` is a compile-time constant which when set, indica For more information, refer to the `PTX ISA `__. +tcgen05.mma Intrinsics +---------------------- + +One of the key instructions introduced in the Blackwell architecture is the tcgen05.mma family, which carries out matrix multiply-accumulate operations using the 5th generation Tensor Core unit. The `tcgen05.mma` instruction supports a broad range of capabilities, including sparsity, block scaling, and weight-stationary convolutions. Accurately modeling these through intrinsics is highly complex, and the following table outlines the large number of intrinsics required to fully support the tcgen05.mma instruction set. + ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| variant | Configuration | Total Variants | ++====================================+===================================================================================================+================+ +| tcgen05.mma.shared | 2 (space) x 2 (sp) x 4 (kind) x 2 (cta_group) x 4 (collector_usage) | 128 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.tensor.ashift | 2 (sp) x 4 (kind) x 2 (cta_group) x 2 (collector_usage) | 32 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.scale_d | 2 (space) x 2 (sp) x 2 (kind) x 2 (cta_group) x 4 (collector_usage) | 128 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.scale_d.tensor.ashift | 2 (sp) x 2 (kind) x 2 (cta_group) x 2 (collector_usage) | 16 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.disable_output_lane | 2 (space) x 2 (sp) x 4 (kind) x 2 (cta_group) x 4 (collector_usage) | 128 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.disable_output_lane... | 2 (sp) x 4 (kind) x 2 (cta_group) x 2 (collector_usage) | 32 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.block_scale | 2 (space) x 1 (mxf4nvf4) x 2 (cta_group) x 2 (scale_vec_size) x 4 (collector_usage) | 32 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.block_scale | 2 (space) x 1 (mxf4) x 2 (cta_group) x 2 (scale_vec_size) x 4 (collector_usage) | 32 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.block_scale | 2 (space) x 1 (mxf8f6f4) x 2 (cta_group) x 2 (scale_vec_size) x 4 (collector_usage) | 32 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.ws | 2 (space) x 2 (sp) x 4 (kind) x 2 (zero_col_mask) x 4 (collector_usage_op) x 4 (collector_buffer) | 256 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| Total | | 816 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ + +To reduce the number of possible intrinsic variations, we've modeled the tcgen05.mma instructions using flag operands. We've added range checks to these flags to prevent invalid values. We also expanded some flags back into intrinsic modifiers to avoid supporting invalid combinations of features. + +'``llvm.nvvm.tcgen05.mma.*``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + declare void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 %kind_flag, i32 %cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %d, addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 %kind_flag, i32 %cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %d, addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 %kind_flag, i32 %cta_group_flag, i32 %collector_usage_a_op_flag) + + ; .sp variants + declare void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, i32 %kind_flag, i32 %cta_group, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %d, addrspace(6) %atensor, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, i32 %kind_flag, i32 %cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %d, addrspace(6) %atensor, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, i32 %kind_flag, i32 %cta_group_flag, i32 %collector_usage_a_op_flag) + + ; .scale_d variants + declare void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d(ptr addrspace(6) %d, addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.ashift(ptr addrspace(6) %d, addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d(ptr addrspace(6) %d, addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.ashift(ptr addrspace(6) %d, addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_a_op_flag) + + ; sp.scale_d variants + declare void @llvm.nvvm.tcgen05.mma.sp.shared.f16.scale_d(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.tf32.scale_d(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d(ptr addrspace(6) %d, addrspace(6) %atensor, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d.ashift(ptr addrspace(6) %d, addrspace(6) %atensor, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d(ptr addrspace(6) %d, addrspace(6) %atensor, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d.ashift(ptr addrspace(6) %d, addrspace(6) %atensor, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_a_op_flag) + +Overview: +""""""""" + +`nvvm.tcgen05.mma` is an asynchronous intrinsic which initiates an `MxNxK` matrix multiply and accumulate operation, `D = A * B + D` where the `A` matrix is `M x K`, the `B` matrix is `K x N`, and the `D` matrix is `M x N`. The operation of the form `D = A*B` is issued when the input predicate argument `%enable_inp_d` is false. The optional immediate argument `%scale_d_imm` can be specified to scale the input matrix `D` as follows: `D = A * B + D * (2 ^ - %scale_d_imm)`. The valid range of values for argument `%scale_d_imm` is `[0, 15]`. The 32-bit register operand idesc is the instruction descriptor as described in `Instruction descriptor `__ + +`nvvm.tcgen05.mma` has single thread semantics, unlike the collective instructions `nvvm.mma.sync` or the PTX `wgmma.mma_async` instruction. So, a single thread issuing the `nvvm.tcgen05.mma` will result in the initiation of the whole matrix multiply and accumulate operation + +When `.sp` is specifed, the dimension of A matrix is `M x (K/2)` and requires specifiying an additional `%spmetadata` argument + +`.ashift` shifts the rows of the A matrix down by one row, except for the last row in the Tensor Memory. `.ashift` is only allowed with M = 128 or M = 256. + +The `%collector_usage_a_op_flag` flag specifies the usage of collector buffer for matrix `A`. It is illegal to specify either of `USE` or `FILL` for `%collector_usage_a_op_flag` along with `.ashift` + +For more information, refer to the +`PTX ISA `__ + +The following tables describes the possible values of the flag arguments + +`%kind_flag` flag: + +============= ========== + `kind_flag` value +============= ========== + F16 0 + TF32 1 + F8F6F4 2 + I8 3 +============= ========== + +`%cta_group` flag: + +============= ========== + `cta_group` value +============= ========== + CG1 1 + CG2 2 +============= ========== + +`%collector_usage_a_op_flag` flag: + +============================= ========== + `collector_usage_a_op_flag` value +============================= ========== + DISCARD 0 + LASTUSE 1 + USE 2 + FILL 3 +============================= ========== + +'``llvm.nvvm.tcgen05.mma.block_scale*``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + ; mxf8f6f4 + declare void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %d, addrspace(3) %a, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %d, addrspace(3) %a, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + + ; mxf4 + declare void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %d, addrspace(3) %a, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block32.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block32.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %d, addrspace(3) %a, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block32.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block32.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + + ; mxf4nvf4 + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, addrspace(6) %scale_a, addrspace(6) %scale_b, i32 %kind_flag, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + +Overview: +""""""""" +`nvvm.tcgen05.mma.block_scale` is an asynchronous intrinsic which initiates an `MxNxK` matrix multiply and accumulate operation, `D = (A * scale_a) * (B * scale_a) + D` where the `A` matrix is `M x K`, the `B` matrix is `K x N`, and the `D` matrix is `M x N`. The matrices `A` and `B` are scaled with `%scale_A` and `%scale_B` matrices respectively before performing the matrix multiply and accumulate operation. The operation of the form `D = A*B` is issued when the input predicate argument `%enable_inp_d` is false. The 32-bit register operand idesc is the instruction descriptor as described in `Instruction descriptor `__ + +`nvvm.tcgen05.mma.block_scale` has single thread semantics, unlike the collective instructions `nvvm.mma.sync` or the PTX `wgmma.mma_async` instruction. So, a single thread issuing the `nvvm.tcgen05.mma.block_scale` will result in the initiation of the whole matrix multiply and accumulate operation + +When `.sp` is specifed, the dimension of A matrix is `Mx(K/2)` and requires specifiying an additional `%spmetadata` argument + +The `%collector_usage_a_op_flag` flag specifies the usage of collector buffer for matrix `A` + +For more information, refer to the +`PTX ISA `__ + +The following tables describes the possible values of the flag arguments + +`%kind_flag` flag: + +============= ========== + `kind_flag` value +============= ========== + MXF8F6F4 0 + MXF4 1 + MXF4NVF4 2 +============= ========== + +`%cta_group` flag: + +============= ========== + `cta_group` value +============= ========== + CG1 1 + CG2 2 +============= ========== + +`%collector_usage_a_op_flag` flag: + +============================= ========== + `collector_usage_a_op_flag` value +============================= ========== + DISCARD 0 + LASTUSE 1 + USE 2 + FILL 3 +============================= ========== + +'``llvm.nvvm.tcgen05.mma.disable_output_lane*``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + declare void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %d, addrspace(3) %a, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %d, addrspace(3) %a, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + + ; .sp variants + declare void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, <4 x i32> %disable_output_lane_v4, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, <8 x i32> %disable_output_lane_v8, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + + ; .scale_d variants + declare void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d.disable_output_lane.cg1(ptr addrspace(6) %d, addrspace(3) %a, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, %i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d.disable_output_lane.cg2(ptr addrspace(6) %d, addrspace(3) %a, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, %i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d.disable_output_lane.cg1(ptr addrspace(6) %d, addrspace(3) %a, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, %i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d.disable_output_lane.cg2(ptr addrspace(6) %d, addrspace(3) %a, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, %i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.disable_output_lane.cg1(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, %i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.disable_output_lane.cg1.ashift(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, %i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.disable_output_lane.cg2(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, %i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.disable_output_lane.cg2.ashift(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, %i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.disable_output_lane.cg1(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, %i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.disable_output_lane.cg1.ashift(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, %i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.disable_output_lane.cg2(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, %i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.disable_output_lane.cg2.ashift(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, %i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + + ; .sp.scale_d variants + declare void @llvm.nvvm.tcgen05.mma.sp.shared.f16.scale_d.disable_output_lane.cg1(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, %i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.f16.scale_d.disable_output_lane.cg2(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, %i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.tf32.scale_d.disable_output_lane.cg1(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, %i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.tf32.scale_d.disable_output_lane.cg2(ptr addrspace(6) %d, i64 %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, %i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d.disable_output_lane.cg1(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, %i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d.disable_output_lane.cg1.ashift(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, %i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d.disable_output_lane.cg2(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, %i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d.disable_output_lane.cg2.ashift(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, %i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d.disable_output_lane.cg1(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, %i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d.disable_output_lane.cg1.ashift(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, %i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d.disable_output_lane.cg2(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, %i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d.disable_output_lane.cg2.ashift(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, addrspace(6) %spmetadata, i1 %enable_inp_d, %i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + +Overview: +""""""""" + +`nvvm.tcgen05.mma.disable_output_lane` is an asynchronous intrinsic which initiates an `MxNxK` matrix multiply and accumulate operation, `D = A * B + D` where the `A` matrix is `M x K`, the `B` matrix is `K x N`, and the `D` matrix is `M x N`. The operation of the form `D = A*B` is issued when the input predicate argument `%enable_inp_d` is false. The optional immediate argument `%scale_d_imm` can be specified to scale the input matrix `D` as follows: `D = A*B+D * (2 ^ - %scale_d_imm)`. The valid range of values for argument `%scale_d_imm` is `[0, 15]`. The 32-bit register operand idesc is the instruction descriptor as described in `Instruction descriptor `__ + +The vector operand `%disable_output_lane` specifies the lane(s) in the Tensor Memory that should be not be updated with the resultant matrix `D`. Elements of the vector operand `%disable_output_lane` forms a mask where each bit corresponds to a lane of the Tensor Memory, with least significant bit of the first element of the vector corresponding to the `lane 0` of the Tensor Memory. If a bit in the mask is 1, then the corresponding lane in the Tensor Memory for the resultant matrix `D` will not be updated + +`nvvm.tcgen05.mma.disable_output_lane` has single thread semantics, unlike the collective instructions `nvvm.mma.sync` or the PTX `wgmma.mma_async` instruction. So, a single thread issuing the `nvvm.tcgen05.mma.disable_output_lane` will result in the initiation of the whole matrix multiply and accumulate operation + +When `.sp` is specifed, the dimension of A matrix is `Mx(K/2)` and requires specifiying an additional `%spmetadata` argument + +`.ashift` shifts the rows of the A matrix down by one row, except for the last row in the Tensor Memory. `.ashift` is only allowed with M = 128 or M = 256. + +The `%collector_usage_a_op_flag` flag specifies the usage of collector buffer for matrix `A`. It is illegal to specify either of `USE` or `FILL` for `%collector_usage_a_op_flag` along with `.ashift` + +For more information, refer to the +`PTX ISA `__ + +The following tables describes the possible values of the flag arguments + +`%kind_flag` flag: + +============= ========== + `kind_flag` value +============= ========== + F16 0 + TF32 1 + F8F6F4 2 + I8 3 +============= ========== + +`%cta_group` flag: + +============= ========== + `cta_group` value +============= ========== + CG1 1 + CG2 2 +============= ========== + +`%collector_usage_a_op_flag` flag: + +============================= ========== + `collector_usage_a_op_flag` value +============================= ========== + DISCARD 0 + LASTUSE 1 + USE 2 + FILL 3 +============================= ========== + + +'``llvm.nvvm.tcgen05.mma.ws*``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + // tcgen05.mma.ws + declare void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %d, addrspace(3) %ashared, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %d, addrspace(3) %ashared, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + + ; .sp variants + declare void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %d, addrspace(3) %ashared, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %spmetadata, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %spmetadata, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %d, addrspace(3) %ashared, addrspace(3) %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %spmetadata, i64 %zero_col_mask, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %d, addrspace(6) %a, i64 %b, i32 %idesc, i1 %enable_inp_d, addrspace(6) %spmetadata, i64 %zero_col_mask, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + +Overview: +""""""""" + +`nvvm.tcgen05.mma.ws` is an asynchronous intrinsic which initiates an `MxNxK` weight stationary convolution matrix multiply and accumulate operation, `D = A * B + D` where the `A` matrix is `M x K`, the `B` matrix is `K x N`, and the `D` matrix is `M x N`. The operation of the form `D = A*B` is issued when the input predicate argument `%enable_inp_d` is false. The optional immediate argument `%scale_d_imm` can be specified to scale the input matrix `D` as follows: `D = A*B+D * (2 ^ - %scale_d_imm)`. The valid range of values for argument `%scale_d_imm` is `[0, 15]`. The 32-bit register operand idesc is the instruction descriptor as described in `Instruction descriptor `__ + +`nvvm.tcgen05.mma` has single thread semantics, unlike the collective instructions `nvvm.mma.sync` or the PTX `wgmma.mma_async` instruction. So, a single thread issuing the `nvvm.tcgen05.mma` will result in the initiation of the whole matrix multiply and accumulate operation + +When `.sp` is specifed, the dimension of A matrix is `Mx(K/2)` and requires specifiying an additional `%spmetadata` argument + +The operand `%zero_col_mask` is a 64-bit register which specifies the `Zero-Column Mask Descriptor `__. The zero-column mask descriptor is used to generate a mask that specifies which columns of `B` matrix will have zero value for the matrix multiply and accumulate operation regardless of the values present in the shared memory. + +The `%collector_usage_b_buffer_flag` and `%collector_usage_b_op_flag` together flag specifies the usage of collector buffer for Matrix `B` + +For more information, refer to the +`PTX ISA `__ + +The following tables describes the possible values of the flag arguments + +`%kind_flag` flag: + +============= ========== + `kind_flag` value +============= ========== + F16 0 + TF32 1 + F8F6F4 2 + I8 3 +============= ========== + +`%cta_group` flag: + +============= ========== + `cta_group` value +============= ========== + CG1 1 + CG2 2 +============= ========== + +`%collector_usage_b_buffer_flag` flag: + +================================ ========== + `collector_usage_b_buffer_flag` value +================================ ========== + B0 0 + B1 1 + B2 2 + B3 3 +================================ ========== + +`%collector_usage_b_op_flag` flag: + +============================= ========== + `collector_usage_b_op_flag` value +============================= ========== + DISCARD 0 + LASTUSE 1 + USE 2 + FILL 3 +============================= ========== + Store Intrinsics ---------------- @@ -2012,7 +2394,7 @@ The completion of the write of each local response is tracked by independent mbarriers at the corresponding shared memory location of each CTA in the cluster. -For more information, refer `PTX ISA `__. +For more information, refer `PTX ISA `__. '``llvm.nvvm.clusterlaunchcontrol.query_cancel.is_canceled``' Intrinsic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -2038,7 +2420,7 @@ it returns ``1`` (true). A true result indicates that: - no other successful response of another ``try_cancel`` request in the grid will contain the first CTA id of that cluster -For more information, refer `PTX ISA `__. +For more information, refer `PTX ISA `__. '``llvm.nvvm.clusterlaunchcontrol.query_cancel.get_first_ctaid.*``' Intrinsics @@ -2067,7 +2449,7 @@ If the request succeeded: If the request failed, the behavior of these intrinsics is undefined. -For more information, refer `PTX ISA `__. +For more information, refer `PTX ISA `__. Perf Monitor Event Intrinsics ----------------------------- diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td index 967d1663f237b..65575d9b0f8ce 100644 --- a/llvm/include/llvm/IR/IntrinsicsNVVM.td +++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td @@ -764,6 +764,46 @@ class NVVM_TCGEN05_LDST_ACCESS_SIZE { true : llvm_void_ty); } +class NVVM_TCGEN05_MMA_NAME { + string intr = "llvm.nvvm.tcgen05.mma" + # Sp # "." # AOperand + # ScaleInputD # AShift; + string record = !subst(".", "_", !subst("llvm.", "int_", intr)); +} + +class NVVM_TCGEN05_MMA_BLOCKSCALE_NAME { + string intr = "llvm.nvvm.tcgen05.mma" + # Sp # "." # AOperand + # "." # Kind # ScaleVecSize + # ".block_scale"; + string record = !subst(".", "_", !subst("llvm.", "int_", intr)); +} + +class NVVM_TCGEN05_MMA_WS_NAME { + string intr = "llvm.nvvm.tcgen05.mma.ws" + # Sp # "." # AOp # ZeroColMask; + string record = !subst(".", "_", !subst("llvm.", "int_", intr)); +} + +class NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME { + string intr = "llvm.nvvm.tcgen05.mma" + # Sp # "." # AOp # ScaleInputD + # ".disable_output_lane.cg" # CtaGroup # AShift; + string record = !subst(".", "_", !subst("llvm.", "int_", intr)); +} + +class NVVM_TCGEN05_MMA_BLOCKSCALE_SUPPORTED { + bit ret = !cond( + !and(!eq(Kind, "mxf8f6f4"), !eq(ScaleVecSize, "")) : true, + !and(!eq(Kind, "mxf4"), !eq(ScaleVecSize, "")) : true, + !and(!eq(Kind, "mxf4nvf4"), !eq(ScaleVecSize, ".block16")) : true, + !and(!eq(Kind, "mxf4"), !eq(ScaleVecSize, ".block32")) : true, + !and(!eq(Kind, "mxf4nvf4"), !eq(ScaleVecSize, ".block32")) : true, + !and(!eq(Kind, "mxf8f6f4"), !eq(ScaleVecSize, ".block32")) : true, + true: false + ); +} + class TexVector types> { string Name = name; list Types = types; @@ -2464,4 +2504,392 @@ def int_nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_ # dim "llvm.nvvm.clusterlaunchcontrol.query_cancel.get_first_ctaid." # dim>; } -} // let TargetPrefix = "nvvm" +// +// tcgen05.mma Intrinsics +// + +foreach space = ["tensor", "shared"] in { + foreach ashiftid = !if(!eq(space, "tensor"), [0, 1], [0]) in { + defvar a_operand_type = !if(!eq(space, "tensor"), llvm_tmem_ptr_ty, + llvm_i64_ty); + + defvar ashift = !if(!eq(ashiftid, 1), ".ashift", ""); + defvar collector_usage_a_range = !if(!eq(ashiftid, 1), 2, 4); + + def NVVM_TCGEN05_MMA_NAME<"", space, ashift, "">.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty], // 4. enable_inp_d + // flags + [llvm_i32_ty, // 5. kind + llvm_i32_ty, // 6. cta_group + llvm_i32_ty]), // 7. collector_usage_a + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 4>, + ImmArg>, Range, 1, 3>, + ImmArg>, Range, 0, collector_usage_a_range>]), + NVVM_TCGEN05_MMA_NAME<"", space, ashift, "">.intr>; + + def NVVM_TCGEN05_MMA_NAME<".sp", space, ashift, "">.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_tmem_ptr_ty], // 5. spmetadata + // flags + [llvm_i32_ty, // 6. kind + llvm_i32_ty, // 7. cta_group + llvm_i32_ty]), // 8. collector_usage_a + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 4>, + ImmArg>, Range, 1, 3>, + ImmArg>, Range, 0, collector_usage_a_range>]), + NVVM_TCGEN05_MMA_NAME<".sp", space, ashift, "">.intr>; + + // scale_d + foreach kind = ["f16", "tf32"] in { + def NVVM_TCGEN05_MMA_NAME<"", space, ashift, "." # kind # ".scale_d">.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_i64_ty], // 5. scale_d_imm + // flags + [llvm_i32_ty, // 6. cta_group + llvm_i32_ty]), // 7. collector_usage_a + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 16>, + ImmArg>, Range, 1, 3>, + ImmArg>, Range, 0, collector_usage_a_range>]), + NVVM_TCGEN05_MMA_NAME<"", space, ashift, "." # kind # ".scale_d">.intr>; + + def NVVM_TCGEN05_MMA_NAME<".sp", space, ashift, "." # kind # ".scale_d">.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_tmem_ptr_ty, // 5. spmetadata + llvm_i64_ty], // 6. scale_d_imm + // flags + [llvm_i32_ty, // 7. cta_group + llvm_i32_ty]), // 8. collector_usage_a + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 16>, + ImmArg>, Range, 1, 3>, + ImmArg>, Range, 0, collector_usage_a_range>]), + NVVM_TCGEN05_MMA_NAME<".sp", space, ashift, "." # kind # ".scale_d">.intr>; + } + } +} + +// +// tcgen05.mma disable_output_lane intrinsics +// +foreach space = ["tensor", "shared"] in { + foreach ashiftid = !if(!eq(space, "tensor"), [0, 1], [0]) in { + defvar a_operand_type = !if(!eq(space, "tensor"), + llvm_tmem_ptr_ty, + llvm_i64_ty); + defvar ashift = !if(!eq(ashiftid, 1), ".ashift", ""); + defvar collector_usage_a_range = !if(!eq(ashiftid, 1), 2, 4); + + def NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<"", space, 1, ashift>.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_v4i32_ty], // 5. disable output lane + // flags + [llvm_i32_ty, // 6. kind + llvm_i32_ty]), // 7. collector_usage_a + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 4>, + ImmArg>, Range, 0, collector_usage_a_range>]), + NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<"", space, 1, ashift>.intr>; + + def NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<"", space, 2, ashift>.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_v8i32_ty], // 5. disable output lane + // flags + [llvm_i32_ty, // 6. kind + llvm_i32_ty]), // 7. collector_usage_a + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 4>, + ImmArg>, Range, 0, collector_usage_a_range>]), + NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<"", space, 2, ashift>.intr>; + + def NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<".sp", space, 1, ashift>.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_tmem_ptr_ty, // 5. spmetadata + llvm_v4i32_ty], // 6. disable output lane + // flags + [llvm_i32_ty, // 7. kind + llvm_i32_ty]), // 8. collector_usage_a + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 4>, + ImmArg>, Range, 0, collector_usage_a_range>]), + NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<".sp", space, 1, ashift>.intr>; + + def NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<".sp", space, 2, ashift>.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_tmem_ptr_ty, // 5. spmetadata + llvm_v8i32_ty], // 6. disable output lane + // flags + [llvm_i32_ty, // 7. kind + llvm_i32_ty]), // 8. collector_usage_a + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 4>, + ImmArg>, Range, 0, collector_usage_a_range>]), + NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<".sp", space, 2, ashift>.intr>; + + // scale-input-d + foreach kind = ["f16", "tf32"] in { + def NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<"", space, 1, ashift, "." # kind # ".scale_d">.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_i64_ty, // 5. scale_d_imm + llvm_v4i32_ty], // 6. disable output lane + // flags + [llvm_i32_ty]), // 7. collector_usage_a + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 16>, + ImmArg>, Range, 0, collector_usage_a_range>]), + NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<"", space, 1, ashift, "." # kind # ".scale_d">.intr>; + + def NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<"", space, 2, ashift, "." # kind # ".scale_d">.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_i64_ty, // 5. scale_d_imm + llvm_v8i32_ty], // 6. disable output lane + // flags + [llvm_i32_ty]), // 7. collector_usage_a + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 16>, + ImmArg>, Range, 0, collector_usage_a_range>]), + NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<"", space, 2, ashift, "." # kind # ".scale_d">.intr>; + + def NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<".sp", space, 1, ashift, "." # kind # ".scale_d">.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_tmem_ptr_ty, // 5. spmetadata + llvm_i64_ty, // 6. scale_d_imm + llvm_v4i32_ty], // 7. disable output lane + // flags + [llvm_i32_ty]), // 8. collector_usage_a + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 16>, + ImmArg>, Range, 0, collector_usage_a_range>]), + NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<".sp", space, 1, ashift, "." # kind # ".scale_d">.intr>; + + def NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<".sp", space, 2, ashift, "." # kind # ".scale_d">.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_tmem_ptr_ty, // 5. spmetadata + llvm_i64_ty, // 6. scale_d_imm + llvm_v8i32_ty], // 7. disable output lane + // flags + [llvm_i32_ty]), // 8. collector_usage_a + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 16>, + ImmArg>, Range, 0, collector_usage_a_range>]), + NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE_NAME<".sp", space, 2, ashift, "." # kind # ".scale_d">.intr>; + } + } +} + +// +// tcgen05.mma block_scale intrinsics +// +foreach sp = ["", ".sp"] in { + foreach space = ["tensor", "shared"] in { + foreach kind = ["mxf8f6f4", "mxf4", "mxf4nvf4"] in { + foreach scale_vec_size = ["", ".block16", ".block32"] in { + defvar a_operand_type = !if(!eq(space, "tensor"), llvm_tmem_ptr_ty, + llvm_i64_ty); + defvar CtaGroupIdx = !if(!eq(sp, ".sp"), 8, 7); + defvar CollectorUsageIdx = !if(!eq(sp, ".sp"), 9, 8); + + if NVVM_TCGEN05_MMA_BLOCKSCALE_SUPPORTED.ret then { + def NVVM_TCGEN05_MMA_BLOCKSCALE_NAME.record: DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // dtmem + a_operand_type, // a + llvm_i64_ty, // b + llvm_i32_ty, // idesc + llvm_i1_ty], // enable_inp_d + !if(!eq(sp, ".sp"), + [llvm_tmem_ptr_ty], []), // spmetadata + [llvm_tmem_ptr_ty, // scale a + llvm_tmem_ptr_ty], // scale b + // flags + [llvm_i32_ty, // cta_group + llvm_i32_ty] // collector_usage_a + ), + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, + Range, 1, 3>, + ImmArg>, + Range, 0, 4>]), + NVVM_TCGEN05_MMA_BLOCKSCALE_NAME.intr>; + } + } + } + } +} + +// +// tcgen05.mma ws intrinsics +// +foreach space = ["tensor", "shared"] in { + defvar a_operand_type = !if(!eq(space, "tensor"), llvm_tmem_ptr_ty, + llvm_i64_ty); + + def NVVM_TCGEN05_MMA_WS_NAME<"", space, "">.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty], // 4. enable_inp_d + // flags + [llvm_i32_ty, // 5. kind + llvm_i32_ty, // 6. collector_buffer_b + llvm_i32_ty]), // 7. collector_usage_b_op + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 4>, + ImmArg>, Range, 0, 4>, + ImmArg>, Range, 0, 4>]), + NVVM_TCGEN05_MMA_WS_NAME<"", space, "">.intr>; + + def NVVM_TCGEN05_MMA_WS_NAME<".sp", space, "">.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_tmem_ptr_ty], // 5. spmetadata + // flags + [llvm_i32_ty, // 6. kind + llvm_i32_ty, // 7. collector_buffer_b + llvm_i32_ty]), // 8. collector_usage_b_op + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 4>, + ImmArg>, Range, 0, 4>, + ImmArg>, Range, 0, 4>]), + NVVM_TCGEN05_MMA_WS_NAME<".sp", space, "">.intr>; + + def NVVM_TCGEN05_MMA_WS_NAME<"", space, ".zero_col_mask">.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_i64_ty], // 5. zero_col_mask + // flags + [llvm_i32_ty, // 6. kind + llvm_i32_ty, // 7. collector_buffer_b + llvm_i32_ty]), // 8. collector_usage_b_op + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 4>, + ImmArg>, Range, 0, 4>, + ImmArg>, Range, 0, 4>]), + NVVM_TCGEN05_MMA_WS_NAME<"", space, ".zero_col_mask">.intr>; + + def NVVM_TCGEN05_MMA_WS_NAME<".sp", space, ".zero_col_mask">.record: + DefaultAttrsIntrinsic<[], + !listconcat([llvm_tmem_ptr_ty, // 0. dtmem + a_operand_type, // 1. a + llvm_i64_ty, // 2. b + llvm_i32_ty, // 3. idesc + llvm_i1_ty, // 4. enable_inp_d + llvm_tmem_ptr_ty, // 5. spmetadata + llvm_i64_ty], // 6. zero_col_mask + // flags + [llvm_i32_ty, // 7. kind + llvm_i32_ty, // 8. collector_buffer_b + llvm_i32_ty]), // 9. collector_usage_b_op + !listconcat([IntrArgMemOnly, + WriteOnly>], + !if(!eq(space, "tensor"), [ReadOnly>], []), + [ImmArg>, Range, 0, 4>, + ImmArg>, Range, 0, 4>, + ImmArg>, Range, 0, 4>]), + NVVM_TCGEN05_MMA_WS_NAME<".sp", space, ".zero_col_mask">.intr>; +} + +} // let TargetPrefix = "nvvm" \ No newline at end of file diff --git a/llvm/include/llvm/IR/NVVMIntrinsicUtils.h b/llvm/include/llvm/IR/NVVMIntrinsicUtils.h index 11bfd733a8854..f18e55c744124 100644 --- a/llvm/include/llvm/IR/NVVMIntrinsicUtils.h +++ b/llvm/include/llvm/IR/NVVMIntrinsicUtils.h @@ -47,6 +47,15 @@ enum class CTAGroupKind : uint8_t { CG_2 = 2, // cta_group::2 modifier }; +enum class Tcgen05MMAKind : uint8_t { F16 = 0, TF32 = 1, F8F6F4 = 2, I8 = 3 }; + +enum class Tcgen05CollectorUsageOp : uint8_t { + DISCARD = 0, + LASTUSE = 1, + FILL = 2, + USE = 3, +}; + inline bool FPToIntegerIntrinsicShouldFTZ(Intrinsic::ID IntrinsicID) { switch (IntrinsicID) { case Intrinsic::nvvm_f2i_rm_ftz: diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 15f45a1f35e2f..b9d6623c811f8 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -1157,6 +1157,64 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const { MAKE_CASE(NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X) MAKE_CASE(NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y) MAKE_CASE(NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SHARED_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SHARED_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SHARED_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SHARED_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT) + MAKE_CASE( + NVPTXISD:: + TCGEN05_MMA_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT) + MAKE_CASE( + NVPTXISD:: + TCGEN05_MMA_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_SP_SHARED_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_SP_SHARED_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_SP_SHARED_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_SP_SHARED_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_SP_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_SP_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE( + NVPTXISD:: + TCGEN05_MMA_SP_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT) + MAKE_CASE( + NVPTXISD:: + TCGEN05_MMA_SP_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT) + MAKE_CASE( + NVPTXISD:: + TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT) + MAKE_CASE( + NVPTXISD:: + TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT) } return nullptr; @@ -2507,6 +2565,137 @@ static SDValue LowerTcgen05St(SDValue Op, SelectionDAG &DAG) { return Tcgen05StNode; } +static unsigned getTcgen05MMADisableOutputLane(unsigned IID) { + switch (IID) { + case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_shared_f16_scale_d_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SHARED_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_shared_f16_scale_d_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SHARED_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_shared_tf32_scale_d_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SHARED_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_shared_tf32_scale_d_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SHARED_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_tensor_f16_scale_d_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_tensor_f16_scale_d_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_tensor_tf32_scale_d_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_tensor_tf32_scale_d_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift: + return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT; + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift: + return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT; + case Intrinsic:: + nvvm_tcgen05_mma_tensor_f16_scale_d_disable_output_lane_cg1_ashift: + return NVPTXISD:: + TCGEN05_MMA_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT; + case Intrinsic:: + nvvm_tcgen05_mma_tensor_f16_scale_d_disable_output_lane_cg2_ashift: + return NVPTXISD:: + TCGEN05_MMA_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT; + case Intrinsic:: + nvvm_tcgen05_mma_tensor_tf32_scale_d_disable_output_lane_cg1_ashift: + return NVPTXISD:: + TCGEN05_MMA_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT; + case Intrinsic:: + nvvm_tcgen05_mma_tensor_tf32_scale_d_disable_output_lane_cg2_ashift: + return NVPTXISD:: + TCGEN05_MMA_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT; + case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic:: + nvvm_tcgen05_mma_sp_shared_f16_scale_d_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SP_SHARED_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic:: + nvvm_tcgen05_mma_sp_shared_f16_scale_d_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SP_SHARED_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic:: + nvvm_tcgen05_mma_sp_shared_tf32_scale_d_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SP_SHARED_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic:: + nvvm_tcgen05_mma_sp_shared_tf32_scale_d_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SP_SHARED_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT; + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT; + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_f16_scale_d_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_f16_scale_d_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_tf32_scale_d_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_tf32_scale_d_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_f16_scale_d_disable_output_lane_cg1_ashift: + return NVPTXISD:: + TCGEN05_MMA_SP_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT; + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_f16_scale_d_disable_output_lane_cg2_ashift: + return NVPTXISD:: + TCGEN05_MMA_SP_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT; + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_tf32_scale_d_disable_output_lane_cg1_ashift: + return NVPTXISD:: + TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT; + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_tf32_scale_d_disable_output_lane_cg2_ashift: + return NVPTXISD:: + TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT; + }; + llvm_unreachable("unhandled tcgen05.mma.disable_output_lane intrinsic"); +} + +static SDValue LowerTcgen05MMADisableOutputLane(SDValue Op, SelectionDAG &DAG) { + SDNode *N = Op.getNode(); + SDLoc DL(N); + unsigned IID = cast(N->getOperand(1))->getZExtValue(); + + SmallVector Ops; + // split the vector argument + for (size_t I = 0; I < N->getNumOperands(); I++) { + if (I == 1) + continue; // skip IID + SDValue Val = N->getOperand(I); + EVT ValVT = Val.getValueType(); + if (ValVT.isVector()) { + EVT EltVT = ValVT.getVectorElementType(); + for (unsigned J = 0, NElts = ValVT.getVectorNumElements(); J < NElts; J++) + Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val, + DAG.getIntPtrConstant(J, DL))); + } else + Ops.push_back(Val); + } + + MemIntrinsicSDNode *MemSD = cast(N); + SDValue Tcgen05MMANode = DAG.getMemIntrinsicNode( + getTcgen05MMADisableOutputLane(IID), DL, N->getVTList(), Ops, + MemSD->getMemoryVT(), MemSD->getMemOperand()); + + return Tcgen05MMANode; +} + static SDValue LowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG) { SDNode *N = Op.getNode(); SDValue Intrin = N->getOperand(1); @@ -2554,6 +2743,59 @@ static SDValue LowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG) { case Intrinsic::nvvm_tcgen05_st_32x32b_x64: case Intrinsic::nvvm_tcgen05_st_32x32b_x128: return LowerTcgen05St(Op, DAG); + case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_shared_f16_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_shared_f16_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_shared_tf32_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_shared_tf32_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2: + case Intrinsic:: + nvvm_tcgen05_mma_sp_shared_f16_scale_d_disable_output_lane_cg1: + case Intrinsic:: + nvvm_tcgen05_mma_sp_shared_f16_scale_d_disable_output_lane_cg2: + case Intrinsic:: + nvvm_tcgen05_mma_sp_shared_tf32_scale_d_disable_output_lane_cg1: + case Intrinsic:: + nvvm_tcgen05_mma_sp_shared_tf32_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_f16_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_tensor_f16_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_tf32_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_tensor_tf32_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_f16_scale_d_disable_output_lane_cg1: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_f16_scale_d_disable_output_lane_cg2: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_tf32_scale_d_disable_output_lane_cg1: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_tf32_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_tensor_f16_scale_d_disable_output_lane_cg1_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_tensor_f16_scale_d_disable_output_lane_cg2_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_tensor_tf32_scale_d_disable_output_lane_cg1_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_tensor_tf32_scale_d_disable_output_lane_cg2_ashift: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_f16_scale_d_disable_output_lane_cg1_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_f16_scale_d_disable_output_lane_cg2_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_tf32_scale_d_disable_output_lane_cg1_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_tf32_scale_d_disable_output_lane_cg2_ashift: + return LowerTcgen05MMADisableOutputLane(Op, DAG); } return Op; } @@ -4530,6 +4772,77 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic( Info.align.reset(); return true; } + case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_shared_f16_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_shared_tf32_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1: + case Intrinsic:: + nvvm_tcgen05_mma_sp_shared_f16_scale_d_disable_output_lane_cg1: + case Intrinsic:: + nvvm_tcgen05_mma_sp_shared_tf32_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_tensor_f16_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_tensor_tf32_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_tensor_f16_scale_d_disable_output_lane_cg1_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_tensor_tf32_scale_d_disable_output_lane_cg1_ashift: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_f16_scale_d_disable_output_lane_cg1: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_tf32_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_f16_scale_d_disable_output_lane_cg1_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_tf32_scale_d_disable_output_lane_cg1_ashift: { + // We are reading and writing back to TMem + Info.opc = ISD::INTRINSIC_VOID; + Info.memVT = MVT::v4i32; + Info.ptrVal = I.getArgOperand(0); + Info.offset = 0; + Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; + Info.align = Align(16); + return true; + } + + case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_shared_f16_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_shared_tf32_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2: + case Intrinsic:: + nvvm_tcgen05_mma_sp_shared_f16_scale_d_disable_output_lane_cg2: + case Intrinsic:: + nvvm_tcgen05_mma_sp_shared_tf32_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_f16_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_tf32_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_f16_scale_d_disable_output_lane_cg2: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_tf32_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_tensor_f16_scale_d_disable_output_lane_cg2_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_tensor_tf32_scale_d_disable_output_lane_cg2_ashift: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_f16_scale_d_disable_output_lane_cg2_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_tf32_scale_d_disable_output_lane_cg2_ashift: { + // We are reading and writing back to TMem + Info.opc = ISD::INTRINSIC_VOID; + Info.memVT = MVT::v8i32; + Info.ptrVal = I.getArgOperand(0); + Info.offset = 0; + Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; + Info.align = Align(16); + return true; + } } return false; } diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h index cf72a1e6db89c..d3d33411f7b6d 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h @@ -84,7 +84,44 @@ enum NodeType : unsigned { StoreV2, StoreV4, StoreV8, - LAST_MEMORY_OPCODE = StoreV8, + TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SHARED_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SHARED_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SHARED_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SHARED_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT, + TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT, + TCGEN05_MMA_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT, + TCGEN05_MMA_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT, + TCGEN05_MMA_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT, + TCGEN05_MMA_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT, + TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SP_SHARED_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SP_SHARED_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SP_SHARED_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SP_SHARED_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT, + TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT, + TCGEN05_MMA_SP_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SP_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SP_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT, + TCGEN05_MMA_SP_TENSOR_F16_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT, + TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT, + TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT, + LAST_MEMORY_OPCODE = + TCGEN05_MMA_SP_TENSOR_TF32_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT, }; } diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td index d33719236b172..526c113374c72 100644 --- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td +++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -5125,7 +5125,7 @@ let Predicates = [hasSM<90>, hasPTX<78>] in { def INT_EXIT : BasicNVPTXInst<(outs), (ins), "exit", [(int_nvvm_exit)]>; // Tcgen05 intrinsics -let isConvergent = true, Predicates = [hasTcgen05Instructions] in { +let isConvergent = true, Predicates = [hasTcgen05Instructions, hasPTX<86>] in { multiclass TCGEN05_ALLOC_INTR { def "" : BasicNVPTXInst<(outs), @@ -5221,7 +5221,7 @@ foreach src_fmt = ["", "b6x16_p32", "b4x16_p64"] in { } } // isConvergent -let hasSideEffects = 1, Predicates = [hasTcgen05Instructions] in { +let hasSideEffects = 1, Predicates = [hasTcgen05Instructions, hasPTX<86>] in { def tcgen05_fence_before_thread_sync: NullaryInst< "tcgen05.fence::before_thread_sync", int_nvvm_tcgen05_fence_before_thread_sync>; @@ -5256,7 +5256,7 @@ class TCGEN05_LDST_REGINFO { class TCGEN05_LD_INST : NVPTXInst<(outs), (ins), "?", []>, - Requires<[hasTcgen05Instructions]> { + Requires<[hasTcgen05Instructions, hasPTX<86>]> { TCGEN05_LDST_REGINFO Info = TCGEN05_LDST_REGINFO< NVVM_TCGEN05_LDST_ACCESS_SIZE.veclen>; @@ -5281,7 +5281,7 @@ class TCGEN05_LD_INST : class TCGEN05_ST_INST : NVPTXInst<(outs), (ins), "?", []>, - Requires<[hasTcgen05Instructions]> { + Requires<[hasTcgen05Instructions, hasPTX<86>]> { TCGEN05_LDST_REGINFO Info = TCGEN05_LDST_REGINFO< NVVM_TCGEN05_LDST_ACCESS_SIZE.veclen>; @@ -5385,3 +5385,477 @@ foreach dim = ["x", "y", "z"] in { def CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_ # dim: CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID; } + +// +// tcgen05.mma Instructions +// + +class Tcgen05MMAInst : + NVPTXInst<(outs), (ins), "?", []>, + Requires<[hasTcgen05Instructions, hasPTX<86>]> { + + string ScaleInpIntrinStr = !if(!eq(HasScaleInput, 1), "_" # KindStr # "_scale_d", ""); + string AShiftIntrinStr = !if(!eq(HasAShift, 1), "_ashift", ""); + + Intrinsic Intrin = !cast( + NVVM_TCGEN05_MMA_NAME.record + # ScaleInpIntrinStr # AShiftIntrinStr); + + dag ScaleInpIns = !if(!eq(HasScaleInput, 1), (ins i64imm:$scale_input_d), (ins)); + string ScaleInpStr = !if(!eq(HasScaleInput, 1), ", $scale_input_d", ""); + dag ScaleInpInput = !if(!eq(HasScaleInput, 1), (Intrin i64:$scale_input_d), (Intrin)); + + dag SparseMetadataIns = !if(!eq(Sp, ".sp"), (ins B32:$spmetadata), (ins)); + string SparseMetadataStr = !if(!eq(Sp, ".sp"), ", [$spmetadata]", ""); + dag SparseMetadataInput = !if(!eq(Sp, ".sp"), (Intrin B32:$spmetadata), (Intrin)); + + string CtaGroupStr = !cond( + !eq(CtaGroup, 1) : "cta_group::1", + !eq(CtaGroup, 2) : "cta_group::2" + ); + + int KindVal = !cond( + !eq(KindStr, "f16"): 0, + !eq(KindStr, "tf32"): 1, + !eq(KindStr, "f8f6f4"): 2, + !eq(KindStr, "i8"): 3, + ); + + int CollectorUsageVal = !cond( + !eq(CollectorUsage, "discard"): 0, + !eq(CollectorUsage, "lastuse"): 1, + !eq(CollectorUsage, "fill"): 2, + !eq(CollectorUsage, "use"): 3 + ); + + string AShiftStr = !if(!eq(HasAShift, 1), ".ashift", ""); + + string AOperandStr = !cond( + !eq(ASpace, "tensor"): "[$a]", + !eq(ASpace, "shared"): "$a" + ); + + string CollectorUsageStr = !if(!ne(CollectorUsage, ""), + ".collector::a::" # CollectorUsage, ""); + + NVPTXRegClass ARegClass = !if(!eq(ASpace, "tensor"), B32, B64); + + dag input = !con((ins B32:$dtmem, + ARegClass:$a, ADDR:$b, + B32:$idesc, + B1:$enable_inp_d), + SparseMetadataIns, + ScaleInpIns); + + let InOperandList = input; + let OutOperandList = (outs); + let AsmString = "tcgen05.mma" # Sp + # "." # CtaGroupStr + # ".kind::" # KindStr + # CollectorUsageStr + # AShiftStr + # " [$dtmem], " # AOperandStr # ", $b" + # SparseMetadataStr + # ", $idesc, $enable_inp_d" + # ScaleInpStr + # ";"; + + dag IntrinsicPattern = !con((Intrin i32:$dtmem, + ARegClass:$a, addr:$b, + i32:$idesc, + i1:$enable_inp_d), + SparseMetadataInput, + ScaleInpInput); + + dag FlagOperands = !con(!if(!ne(HasScaleInput, 1), + (Intrin (i32 KindVal)), (Intrin)), + (Intrin (i32 CtaGroup), (i32 CollectorUsageVal))); + + let Pattern = [!con(IntrinsicPattern, FlagOperands)]; +} + +// tcgen05.mma +foreach sp = ["", ".sp"] in { + foreach kind = ["f16", "tf32", "f8f6f4", "i8"] in { + foreach cta_group = [1, 2] in { + foreach collector_usage = ["discard", "lastuse", "fill", "use", ] in { + foreach scale_input_d = [0, 1] in { + defvar HasScaleInput = !and(!eq(scale_input_d, 1), + !or(!eq(kind, "f16"), + !eq(kind, "tf32"))); + + def : Tcgen05MMAInst; + + def : Tcgen05MMAInst; + + def : Tcgen05MMAInst; + } + } + } + } +} + +// +// tcgen05.mma.ws Instructions +// + +class Tcgen05MMAWSInst : + NVPTXInst<(outs), (ins), "?", []>, + Requires<[hasTcgen05Instructions, hasPTX<86>]> { + + Intrinsic Intrin = !cast( + NVVM_TCGEN05_MMA_WS_NAME.record); + + dag ZeroColMaskIns = !if(!eq(HasZeroColMask, 1), + (ins B64:$zero_col_mask), (ins)); + string ZeroColMaskStr = !if(!eq(HasZeroColMask, 1), ", $zero_col_mask", ""); + dag ZeroColMaskInput = !if(!eq(HasZeroColMask, 1), + (Intrin i64:$zero_col_mask), (Intrin)); + + dag SparseMetadataIns = !if(!eq(Sp, ".sp"), (ins B32:$spmetadata), (ins)); + string SparseMetadataStr = !if(!eq(Sp, ".sp"), ", [$spmetadata]", ""); + dag SparseMetadataInput = !if(!eq(Sp, ".sp"), (Intrin B32:$spmetadata), (Intrin)); + + int KindVal = !cond( + !eq(KindStr, "f16") : 0, + !eq(KindStr, "tf32") : 1, + !eq(KindStr, "f8f6f4"): 2, + !eq(KindStr, "i8") : 3, + ); + + int CollectorUsageOp = !cond( + !eq(CollectorUsageOpStr, "discard"): 0, + !eq(CollectorUsageOpStr, "lastuse"): 1, + !eq(CollectorUsageOpStr, "fill") : 2, + !eq(CollectorUsageOpStr, "use") : 3, + ); + + int CollectorBufferB = !cond( + !eq(CollectorBufferBStr, "b0"): 0, + !eq(CollectorBufferBStr, "b1"): 1, + !eq(CollectorBufferBStr, "b2"): 2, + !eq(CollectorBufferBStr, "b3"): 3 + ); + + string AOperandStr = !cond( + !eq(ASpace, "tensor"): "[$a]", + !eq(ASpace, "shared"): "$a" + ); + + NVPTXRegClass ARegClass = !if(!eq(ASpace, "tensor"), B32, B64); + + dag input = !con((ins B32:$dtmem, + ARegClass:$a, B64:$b, + B32:$idesc, + B1:$enable_inp_d), + SparseMetadataIns, + ZeroColMaskIns); + + let InOperandList = input; + let OutOperandList = (outs); + let AsmString = "tcgen05.mma.ws" # Sp + # ".cta_group::1" + # ".kind::" # KindStr + # ".collector::" # CollectorBufferBStr # "::" # CollectorUsageOpStr + # " [$dtmem], " # AOperandStr # ", $b" + # SparseMetadataStr + # ", $idesc, $enable_inp_d" + # ZeroColMaskStr + # ";"; + + dag IntrinsicPattern = !con((Intrin i32:$dtmem, + ARegClass:$a, i64:$b, + i32:$idesc, + i1:$enable_inp_d), + SparseMetadataInput, + ZeroColMaskInput); + + dag FlagOperands = (Intrin (i32 KindVal), (i32 CollectorBufferB), + (i32 CollectorUsageOp)); + + let Pattern = [!con(IntrinsicPattern, FlagOperands)]; +} + +// tcgen05.mma.ws +foreach space = ["shared", "tensor"] in { + foreach sp = ["", ".sp"] in { + foreach kind = ["f16", "tf32", "f8f6f4", "i8"] in { + foreach collector_buffer_b = ["b0", "b1", "b2", "b3"] in { + foreach collector_usage_op = ["discard", "fill", "use", "lastuse"] in { + foreach zero_col_mask = [0, 1] in { + def : Tcgen05MMAWSInst; + } + } + } + } + } +} + +class Tcgen05MMADisableOutputLaneTypeProfile: + SDTypeProfile<0, 0, []> { + int DisableOutputLaneVecSize = !cond( + !eq(CtaGroup, 1) : 4, + !eq(CtaGroup, 2) : 8 + ); + + list VTs = !listconcat( + [i32], // d + !if(!eq(ASpace, "tensor"), [i32], [i64]), // a + [i64, i32, i1], // b, idesc, enable_inp_d + !if(!eq(Sp, ".sp"), [i32], []), // spmetadata + !if(!eq(ScaleInputD, 1), [i64], []), // scale_input_d + !listsplat(i32, DisableOutputLaneVecSize), // disable_output_lane + !if(!ne(ScaleInputD, 1), [i32], []), // kind + [i32] // collector_usage + ); + let Constraints = !foreach(x, !range(!size(VTs)), SDTCisVT); + let NumOperands = !size(Constraints); +} + +class Tcgen05MMADisableOutputLaneSDNode: + SDNode<"NVPTXISD::TCGEN05_MMA" + # !if(!eq(Sp, ".sp"), "_SP", "") + # "_" # !toupper(ASpace) + # !if(!eq(ScaleInput, 1), + "_" # !toupper(Kind) # "_SCALE_D", "") + # "_DISABLE_OUTPUT_LANE_CG" # CtaGroup + # !if(!eq(HasAShift, 1), "_ASHIFT", ""), + Tcgen05MMADisableOutputLaneTypeProfile, + [SDNPHasChain, SDNPSideEffect]>; + +class Tcgen05MMADisableOutputLaneInst : + NVPTXInst<(outs), (ins), "?", []>, + Requires<[hasTcgen05Instructions, hasPTX<86>]> { + + SDNode Opcode = Tcgen05MMADisableOutputLaneSDNode; + + + dag ScaleInpIns = !if(!eq(HasScaleInput, 1), (ins i64imm:$scale_input_d), (ins)); + string ScaleInpStr = !if(!eq(HasScaleInput, 1), ", $scale_input_d", ""); + dag ScaleInpInput = !if(!eq(HasScaleInput, 1), (Opcode i64:$scale_input_d), (Opcode)); + + // disable output lane + int DisableOutputLaneVecSize = !cond( + !eq(CtaGroup, 1) : 4, + !eq(CtaGroup, 2) : 8 + ); + + dag DisableOutputLaneIns = !dag(ins, + !listsplat(B32, DisableOutputLaneVecSize), + !foreach(x, + !range(DisableOutputLaneVecSize), + "disable_output_lane" # x)); + + dag DisableOutputLaneInput = !dag(Opcode, + !listsplat(i32, DisableOutputLaneVecSize), + !foreach(x, + !range(DisableOutputLaneVecSize), + "disable_output_lane" # x)); + + string DisableOutputLaneStr = "{{" # + !interleave( + !foreach(x, + !range(DisableOutputLaneVecSize), + "$disable_output_lane" # x), + ", ") + # "}}"; + + dag SparseMetadataIns = !if(!eq(Sp, ".sp"), (ins B32:$spmetadata), (ins)); + string SparseMetadataStr = !if(!eq(Sp, ".sp"), ", [$spmetadata]", ""); + dag SparseMetadataInput = !if(!eq(Sp, ".sp"), (Opcode i32:$spmetadata), (Opcode)); + + string CtaGroupStr = !cond( + !eq(CtaGroup, 1) : "cta_group::1", + !eq(CtaGroup, 2) : "cta_group::2" + ); + + int KindVal = !cond( + !eq(Kind, "f16"): 0, + !eq(Kind, "tf32"): 1, + !eq(Kind, "f8f6f4"): 2, + !eq(Kind, "i8"): 3, + ); + + int CollectorUsage = !cond( + !eq(CollectorUsageStr, "discard"): 0, + !eq(CollectorUsageStr, "lastuse"): 1, + !eq(CollectorUsageStr, "fill"): 2, + !eq(CollectorUsageStr, "use"): 3, + ); + + string AOperandStr = !cond( + !eq(ASpace, "tensor"): "[$a]", + !eq(ASpace, "shared"): "$a" + ); + + NVPTXRegClass ARegClass = !if(!eq(ASpace, "tensor"), B32, B64); + + string AShiftStr = !if(!eq(HasAShift, 1), ".ashift", ""); + + dag InOperandList = !con((ins B32:$dtmem, + ARegClass:$a, B64:$b, + B32:$idesc, + B1:$enable_inp_d), + SparseMetadataIns, + ScaleInpIns, + DisableOutputLaneIns); + + let OutOperandList = (outs); + let AsmString = "tcgen05.mma" # Sp + # "." # CtaGroupStr + # ".kind::" # Kind + # AShiftStr + # ".collector::a::" # CollectorUsageStr + # " " # "[$dtmem], " # AOperandStr # ", $b" + # SparseMetadataStr + # ", " # "$idesc" + # ", " # DisableOutputLaneStr + # ", $enable_inp_d" + # ScaleInpStr + # ";"; + + dag IntrinsicPattern = !con((Opcode i32:$dtmem, + ARegClass:$a, i64:$b, + i32:$idesc, + i1:$enable_inp_d), + SparseMetadataInput, + ScaleInpInput, + DisableOutputLaneInput); + + dag FlagOperands = !con(!if(!ne(HasScaleInput, 1), + (Opcode (i32 KindVal)), (Opcode)), + (Opcode (i32 CollectorUsage))); + + let Pattern = [!con(IntrinsicPattern, FlagOperands)]; +} + +// tcgen05.mma.disable_output_lane +foreach sp = ["", ".sp"] in { + foreach kind = ["f16", "tf32", "f8f6f4", "i8"] in { + foreach cta_group = [1, 2] in { + foreach collector_usage = ["fill", "use", "lastuse", "discard"] in { + foreach scale_input_d = [0, 1] in { + defvar HasScaleInput = !and(!eq(scale_input_d, 1), + !or(!eq(kind, "f16"), + !eq(kind, "tf32"))); + def : Tcgen05MMADisableOutputLaneInst; + + def : Tcgen05MMADisableOutputLaneInst; + + def : Tcgen05MMADisableOutputLaneInst; + } + } + } + } +} + +class Tcgen05MMABlockScaleInst: + NVPTXInst<(outs), (ins), "?", []>, + Requires<[hasTcgen05Instructions, PTXPredicate]> { + + Intrinsic Intrin = !cast( + NVVM_TCGEN05_MMA_BLOCKSCALE_NAME.record); + + dag SparseMetadataIns = !if(!eq(Sp, ".sp"), (ins B32:$spmetadata), (ins)); + string SparseMetadataStr = !if(!eq(Sp, ".sp"), ", [$spmetadata]", ""); + dag SparseMetadataInput = !if(!eq(Sp, ".sp"), (Intrin i32:$spmetadata), (Intrin)); + + string CtaGroupStr = !cond( + !eq(CtaGroup, 1) : "cta_group::1", + !eq(CtaGroup, 2) : "cta_group::2" + ); + int KindVal = !cond( + !eq(KindStr, "mxf8f6f4") : 0, + !eq(KindStr, "mxf4") : 1, + !eq(KindStr, "mxf4nvf4") : 2, + ); + + string ScaleVecSizeStr = !cond( + !eq(ScaleVecSize, "") :"", + !eq(ScaleVecSize, ".block16") :".block16", + !eq(ScaleVecSize, ".block32") :".block32", + ); + + int CollectorUsage = !cond( + !eq(CollectorUsageStr, "discard") : 0, + !eq(CollectorUsageStr, "lastuse") : 1, + !eq(CollectorUsageStr, "fill") : 2, + !eq(CollectorUsageStr, "use") : 3, + ); + + string AOperandStr = !cond( + !eq(ASpace, "tensor"): "[$a]", + !eq(ASpace, "shared"): "$a" + ); + + NVPTXRegClass ARegClass = !if(!eq(ASpace, "tensor"), B32, B64); + + dag input = !con((ins B32:$dtmem, ARegClass:$a, B64:$b, + B32:$idesc, B1:$enable_inp_d), + SparseMetadataIns, + (ins B32:$scale_a, + B32:$scale_b)); + + let InOperandList = input; + let OutOperandList = (outs); + let AsmString = "tcgen05.mma" # Sp + # "." # CtaGroupStr + # ".kind::" # KindStr + # ".block_scale" # ScaleVecSizeStr + # ".collector::a::" # CollectorUsageStr + # " [$dtmem], " # AOperandStr # ", $b" + # SparseMetadataStr + # ", $idesc, [$scale_a], [$scale_b], $enable_inp_d" + # ";"; + + dag IntrinsicPattern = !con((Intrin i32:$dtmem, + ARegClass:$a, i64:$b, + i32:$idesc, + i1:$enable_inp_d), + SparseMetadataInput, + (Intrin i32:$scale_a, + i32:$scale_b)); + + dag FlagOperands = (Intrin (i32 CtaGroup), (i32 CollectorUsage)); + + let Pattern = [!con(IntrinsicPattern, FlagOperands)]; +} + +// tcgen05.mma.block_scale +foreach sp = ["", ".sp"] in { + foreach space = ["tensor", "shared"] in { + foreach kind = ["mxf8f6f4", "mxf4", "mxf4nvf4"] in { + foreach scale_vec_size = ["", ".block16", ".block32"] in { + foreach cta_group = [1, 2] in { + foreach collector_usage = ["fill", "use", "lastuse", "discard"] in { + if NVVM_TCGEN05_MMA_BLOCKSCALE_SUPPORTED.ret then { + def : Tcgen05MMABlockScaleInst, hasPTX<86>)>; + } + } + } + } + } + } +} diff --git a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h index 81af55edccadb..889fa2da81304 100644 --- a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h +++ b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h @@ -114,7 +114,7 @@ class NVPTXSubtarget : public NVPTXGenSubtargetInfo { break; } - return HasTcgen05 && PTXVersion >= 86; + return HasTcgen05; } // f32x2 instructions in Blackwell family bool hasF32x2Instructions() const { diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-ptx88.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-ptx88.ll new file mode 100644 index 0000000000000..cfaacc5822e14 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-ptx88.ll @@ -0,0 +1,526 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx88 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx88 | FileCheck %s +; RUN: %if ptxas-12.9 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-12.9 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %} + +define void @tcgen05_mma_mxf8f6f4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf8f6f4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf8f6f4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf8f6f4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf8f6f4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf8f6f4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf8f6f4_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf8f6f4_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf8f6f4_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf8f6f4_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_sp_mxf8f6f4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf8f6f4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf8f6f4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf8f6f4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf8f6f4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf8f6f4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf8f6f4_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf8f6f4_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf8f6f4_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf8f6f4_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf8f6f4_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_mxf4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf4_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf4_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf4_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf4_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_sp_mxf4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf4_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf4_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf4_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf4_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf4_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_mxf4nvf4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf4nvf4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf4nvf4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf4nvf4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf4nvf4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf4nvf4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf4nvf4_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf4nvf4_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf4nvf4_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf4nvf4_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_mxf4nvf4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf4nvf4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf4nvf4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf4nvf4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf4nvf4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf4nvf4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf4nvf4_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf4nvf4_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf4nvf4_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf4nvf4_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf4nvf4_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block16.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block32.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale.ll new file mode 100644 index 0000000000000..18511989753cb --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale.ll @@ -0,0 +1,291 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %} + +define void @tcgen05_mma_mxf8f6f4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf8f6f4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf8f6f4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf8f6f4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf8f6f4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf8f6f4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf8f6f4_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf8f6f4_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf8f6f4_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf8f6f4_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_mxf8f6f4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf8f6f4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf8f6f4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf8f6f4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf8f6f4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf8f6f4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf8f6f4_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf8f6f4_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf8f6f4_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf8f6f4_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf8f6f4_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_mxf4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf4_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf4_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf4_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf4_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_mxf4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf4_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf4_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf4_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf4_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf4_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-disable-output-lane.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-disable-output-lane.ll new file mode 100644 index 0000000000000..1f7870e21297d --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-disable-output-lane.ll @@ -0,0 +1,681 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %} + +define void @tcgen05_mma_fp16_shared_disable_output_lane(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_fp16_shared_disable_output_lane( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<16>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_fp16_shared_disable_output_lane_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_fp16_shared_disable_output_lane_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_fp16_shared_disable_output_lane_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_fp16_shared_disable_output_lane_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_fp16_shared_disable_output_lane_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_fp16_shared_disable_output_lane_param_6]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_fp16_shared_disable_output_lane_param_1]; +; CHECK-NEXT: ld.param.v4.b32 {%r8, %r9, %r10, %r11}, [tcgen05_mma_fp16_shared_disable_output_lane_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r12, %r13, %r14, %r15}, [tcgen05_mma_fp16_shared_disable_output_lane_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.ashift.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.ashift.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::fill [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::fill [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::use [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::use [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::use [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::use [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_fp16_shared_disable_output_lane(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_fp16_shared_disable_output_lane( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<17>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_fp16_shared_disable_output_lane_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_fp16_shared_disable_output_lane_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_fp16_shared_disable_output_lane_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_fp16_shared_disable_output_lane_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_fp16_shared_disable_output_lane_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_sp_fp16_shared_disable_output_lane_param_6]; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_sp_fp16_shared_disable_output_lane_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r8, [tcgen05_mma_sp_fp16_shared_disable_output_lane_param_1]; +; CHECK-NEXT: ld.param.v4.b32 {%r9, %r10, %r11, %r12}, [tcgen05_mma_sp_fp16_shared_disable_output_lane_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r13, %r14, %r15, %r16}, [tcgen05_mma_sp_fp16_shared_disable_output_lane_param_7]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.ashift.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.ashift.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.ashift.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.ashift.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::fill [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::fill [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::fill [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::fill [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::use [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::use [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::use [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::use [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 3) + + ret void +} + +define void @tcgen05_mma_tf32_shared_disable_output_lane(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_tf32_shared_disable_output_lane( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<16>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_tf32_shared_disable_output_lane_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_tf32_shared_disable_output_lane_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_tf32_shared_disable_output_lane_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_tf32_shared_disable_output_lane_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_tf32_shared_disable_output_lane_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_tf32_shared_disable_output_lane_param_6]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_tf32_shared_disable_output_lane_param_1]; +; CHECK-NEXT: ld.param.v4.b32 {%r8, %r9, %r10, %r11}, [tcgen05_mma_tf32_shared_disable_output_lane_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r12, %r13, %r14, %r15}, [tcgen05_mma_tf32_shared_disable_output_lane_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.ashift.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.ashift.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::fill [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::fill [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::use [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::use [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::use [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::use [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_tf32_shared_disable_output_lane(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_tf32_shared_disable_output_lane( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<17>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_tf32_shared_disable_output_lane_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_tf32_shared_disable_output_lane_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_tf32_shared_disable_output_lane_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_tf32_shared_disable_output_lane_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_tf32_shared_disable_output_lane_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_sp_tf32_shared_disable_output_lane_param_6]; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_sp_tf32_shared_disable_output_lane_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r8, [tcgen05_mma_sp_tf32_shared_disable_output_lane_param_1]; +; CHECK-NEXT: ld.param.v4.b32 {%r9, %r10, %r11, %r12}, [tcgen05_mma_sp_tf32_shared_disable_output_lane_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r13, %r14, %r15, %r16}, [tcgen05_mma_sp_tf32_shared_disable_output_lane_param_7]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.ashift.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.ashift.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.ashift.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.ashift.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::fill [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::fill [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::use [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::use [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::use [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::use [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 3) + + ret void +} + +define void @tcgen05_mma_f8f6f4_shared_disable_output_lane(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_f8f6f4_shared_disable_output_lane( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<16>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_f8f6f4_shared_disable_output_lane_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_f8f6f4_shared_disable_output_lane_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_f8f6f4_shared_disable_output_lane_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_f8f6f4_shared_disable_output_lane_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_f8f6f4_shared_disable_output_lane_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_f8f6f4_shared_disable_output_lane_param_6]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_f8f6f4_shared_disable_output_lane_param_1]; +; CHECK-NEXT: ld.param.v4.b32 {%r8, %r9, %r10, %r11}, [tcgen05_mma_f8f6f4_shared_disable_output_lane_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r12, %r13, %r14, %r15}, [tcgen05_mma_f8f6f4_shared_disable_output_lane_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.ashift.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.ashift.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::use [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::use [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_f8f6f4_shared_disable_output_lane(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_f8f6f4_shared_disable_output_lane( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<17>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_param_6]; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r8, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_param_1]; +; CHECK-NEXT: ld.param.v4.b32 {%r9, %r10, %r11, %r12}, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r13, %r14, %r15, %r16}, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_param_7]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.ashift.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.ashift.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.ashift.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.ashift.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::use [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::use [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_i8_shared_disable_output_lane(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_i8_shared_disable_output_lane( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<16>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_i8_shared_disable_output_lane_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_i8_shared_disable_output_lane_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_i8_shared_disable_output_lane_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_i8_shared_disable_output_lane_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_i8_shared_disable_output_lane_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_i8_shared_disable_output_lane_param_6]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_i8_shared_disable_output_lane_param_1]; +; CHECK-NEXT: ld.param.v4.b32 {%r8, %r9, %r10, %r11}, [tcgen05_mma_i8_shared_disable_output_lane_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r12, %r13, %r14, %r15}, [tcgen05_mma_i8_shared_disable_output_lane_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.ashift.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.ashift.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::fill [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::fill [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::use [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::use [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::use [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::use [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_i8_shared_disable_output_lane(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_i8_shared_disable_output_lane( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<17>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_i8_shared_disable_output_lane_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_i8_shared_disable_output_lane_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_i8_shared_disable_output_lane_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_i8_shared_disable_output_lane_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_i8_shared_disable_output_lane_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_sp_i8_shared_disable_output_lane_param_6]; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_sp_i8_shared_disable_output_lane_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::discard [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r8, [tcgen05_mma_sp_i8_shared_disable_output_lane_param_1]; +; CHECK-NEXT: ld.param.v4.b32 {%r9, %r10, %r11, %r12}, [tcgen05_mma_sp_i8_shared_disable_output_lane_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r13, %r14, %r15, %r16}, [tcgen05_mma_sp_i8_shared_disable_output_lane_param_7]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::discard [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.ashift.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.ashift.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.ashift.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.ashift.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::fill [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::fill [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::fill [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::fill [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::use [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::use [%r1], %rd1, %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::use [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::use [%r1], [%r8], %rd2, [%r7], %r2, {%r13, %r14, %r15, %r16, %r9, %r10, %r11, %r12}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 3) + + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-scale-d.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-scale-d.ll new file mode 100644 index 0000000000000..2b9019d09cc79 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-scale-d.ll @@ -0,0 +1,412 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %} + +define void @tcgen05_mma_fp16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_fp16( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_fp16_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_fp16_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_fp16_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_fp16_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_fp16_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_fp16_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::use [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::use [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::use [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::use [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_sp_fp16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_fp16( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_fp16_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_fp16_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_fp16_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_fp16_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_fp16_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_fp16_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_fp16_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.f16.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.f16.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_tf32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_tf32( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_tf32_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_tf32_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_tf32_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_tf32_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_tf32_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_tf32_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::use [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::use [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::use [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::use [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_sp_tf32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_tf32( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_tf32_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_tf32_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_tf32_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_tf32_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_tf32_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_tf32_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_tf32_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.tf32.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.tf32.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_fp16_shared_scale_d_disable_output_lane(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_fp16_shared_scale_d_disable_output_lane( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<16>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_param_6]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_param_1]; +; CHECK-NEXT: ld.param.v4.b32 {%r8, %r9, %r10, %r11}, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r12, %r13, %r14, %r15}, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.f16.scale_d.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <8 x i32> %disable_output_lanev8, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <8 x i32> %disable_output_lanev8, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.f16.scale_d.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <8 x i32> %disable_output_lanev8, i32 0) + + + ret void +} + +define void @tcgen05_mma_tf32_shared_scale_d_disable_output_lane(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_tf32_shared_scale_d_disable_output_lane( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<16>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_param_6]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_param_1]; +; CHECK-NEXT: ld.param.v4.b32 {%r8, %r9, %r10, %r11}, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r12, %r13, %r14, %r15}, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r12, %r13, %r14, %r15, %r8, %r9, %r10, %r11}, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.tf32.scale_d.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <8 x i32> %disable_output_lanev8, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <8 x i32> %disable_output_lanev8, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.tf32.scale_d.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <8 x i32> %disable_output_lanev8, i32 0) + + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-ws.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-ws.ll new file mode 100644 index 0000000000000..f12d8fb10aab3 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-ws.ll @@ -0,0 +1,569 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %} + +define void @tcgen05_mma_ws_fp16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_ws_fp16( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_fp16_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_fp16_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_fp16_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_fp16_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_fp16_param_4]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_fp16_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 3) + ret void +} + +define void @tcgen05_mma_ws_fp16_zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask) { +; CHECK-LABEL: tcgen05_mma_ws_fp16_zero_col_mask( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_fp16_zero_col_mask_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_fp16_zero_col_mask_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_fp16_zero_col_mask_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_fp16_zero_col_mask_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_fp16_zero_col_mask_param_4]; +; CHECK-NEXT: ld.param.b64 %rd3, [tcgen05_mma_ws_fp16_zero_col_mask_param_6]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_fp16_zero_col_mask_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::lastuse [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 3) + ret void +} + +define void @tcgen05_mma_ws_sp_fp16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta) { +; CHECK-LABEL: tcgen05_mma_ws_sp_fp16( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_sp_fp16_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_sp_fp16_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_sp_fp16_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_sp_fp16_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_sp_fp16_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_sp_fp16_param_6]; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_ws_sp_fp16_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 3) + ret void +} + +define void @tcgen05_mma_ws_sp_fp16_zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, ptr addrspace(6) %spmeta) { +; CHECK-LABEL: tcgen05_mma_ws_sp_fp16_zero_col_mask( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_4]; +; CHECK-NEXT: ld.param.b64 %rd3, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_6]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_7]; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 3) + ret void +} + +define void @tcgen05_mma_ws_tf32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_ws_tf32( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_tf32_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_tf32_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_tf32_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_tf32_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_tf32_param_4]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_tf32_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 3) + ret void +} + +define void @tcgen05_mma_ws_f8f6f4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_ws_f8f6f4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_f8f6f4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_f8f6f4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_f8f6f4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_f8f6f4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_f8f6f4_param_4]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_f8f6f4_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 3) + ret void +} + +define void @tcgen05_mma_ws_i8(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_ws_i8( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_i8_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_i8_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_i8_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_i8_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_i8_param_4]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_i8_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma.ll new file mode 100644 index 0000000000000..0a43bd5f91fed --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma.ll @@ -0,0 +1,601 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-12.8 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %} + +define void @tcgen05_mma_fp16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_fp16( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_fp16_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_fp16_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_fp16_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_fp16_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_fp16_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_fp16_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_sp_fp16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_fp16( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_fp16_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_fp16_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_fp16_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_fp16_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_fp16_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_fp16_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_fp16_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_tf32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_tf32( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_tf32_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_tf32_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_tf32_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_tf32_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_tf32_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_tf32_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_sp_tf32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_tf32( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_tf32_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_tf32_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_tf32_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_tf32_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_tf32_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_tf32_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_tf32_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_f8f6f4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_f8f6f4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_f8f6f4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_f8f6f4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_f8f6f4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_f8f6f4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_f8f6f4_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_f8f6f4_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_sp_f8f6fr(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_f8f6fr( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_f8f6fr_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_f8f6fr_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_f8f6fr_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_f8f6fr_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_f8f6fr_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_f8f6fr_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_f8f6fr_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_i8(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_i8( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_i8_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_i8_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_i8_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_i8_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_i8_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_i8_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_sp_i8(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_i8( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_i8_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_i8_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_i8_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_i8_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_i8_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_i8_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_i8_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 3) + ret void +}