-
Notifications
You must be signed in to change notification settings - Fork 239
Conv:TF32: add more instances - 2 #2879
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
yingluAMD
wants to merge
20
commits into
develop
Choose a base branch
from
tf32_instance_0919
base: develop
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
+1,940
−151
Open
Changes from 16 commits
Commits
Show all changes
20 commits
Select commit
Hold shift + click to select a range
58e82b8
conv:tf32:add more instances
yingluAMD 45d0057
add instances of device_grouped_conv_fwd_xdl_f32_comp_instances
yingluAMD 823ee07
add instances of device_grouped_conv_fwd_xdl_f32_tf32_mem_instances
yingluAMD 255a25d
add instances of device_grouped_conv_fwd_xdl_large_tensor_f32_tf32_in…
yingluAMD 58a3fa1
review
yingluAMD 7f6962e
tf32:conv:add instances for base class DeviceConvFwd
yingluAMD ddfc65d
tf32:conv:add instances for base class DeviceGroupedConvBwdDataMultipleD
yingluAMD de9a550
tf32:conv:add instances for base class DeviceGroupedConvBwdWeight
yingluAMD b3db6c1
self review
yingluAMD b3bb54f
add tf32 in profiler
yingluAMD 623a991
Merge branch 'develop' into tf32_instance_0919
yingluAMD 7a653cd
remove useless instances
yingluAMD 040aee6
remove gnhwc/ngchw/ngcdhw instances
yingluAMD a8d9fbe
remove useless bwd instances
yingluAMD 94da54b
change check_err for tf32
yingluAMD f54bab1
fix clang-format fail
yingluAMD 374e6bb
remove non-ndhwgc/nhwgc/nhwc instances
yingluAMD 6f66571
complement ndhwgc instances
yingluAMD 1be39fc
update copyright datetime
yingluAMD a1b65ec
add check in IsSupportedArgument()
yingluAMD File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
207 changes: 207 additions & 0 deletions
207
example/17_convnd_bwd_data/convnd_bwd_data_xdl_fp32.cpp
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,207 @@ | ||
// SPDX-License-Identifier: MIT | ||
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved. | ||
|
||
#include "convnd_bwd_data_common.hpp" | ||
|
||
#include "ck/tensor_operation/gpu/device/impl/device_convnd_bwd_data_nwc_kxc_nwk_xdl.hpp" | ||
|
||
using InDataType = float; | ||
using WeiDataType = float; | ||
using OutDataType = float; | ||
using AccDataType = float; | ||
|
||
template <ck::index_t... Is> | ||
using S = ck::Sequence<Is...>; | ||
|
||
using InElementOp = ck::tensor_operation::element_wise::PassThrough; | ||
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough; | ||
using OutElementOp = ck::tensor_operation::element_wise::PassThrough; | ||
|
||
static constexpr auto ConvBwdDefault = | ||
ck::tensor_operation::device::ConvolutionBackwardDataSpecialization::Default; | ||
|
||
template <ck::index_t NDimSpatial> | ||
using DeviceConvNdBwdDataInstance = ck::tensor_operation::device::DeviceConvNdBwdDataNwcKxcNwk_Xdl< | ||
|
||
NDimSpatial, // NDimSpatial | ||
InDataType, // InDataType | ||
WeiDataType, // WeiDataType | ||
OutDataType, // OutDataType | ||
AccDataType, // AccDataType | ||
InElementOp, // InElementwiseOperation | ||
WeiElementOp, // WeiElementwiseOperation | ||
OutElementOp, // OutElementwiseOperation | ||
ConvBwdDefault, // ConvolutionBackwardDataSpecialization | ||
256, // BlockSize | ||
128, // MPerBlock | ||
128, // NPerBlock | ||
4, // K0PerBlock | ||
8, // K1 | ||
32, // MPerXdl | ||
32, // NPerXdl | ||
2, // MXdlPerWave | ||
2, // NXdlPerWave | ||
S<4, 64, 1>, // ABlockTransferThreadClusterLengths_K0_M_K1 | ||
S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder | ||
S<1, 0, 2>, // ABlockTransferSrcAccessOrder | ||
2, // ABlockTransferSrcVectorDim | ||
8, // ABlockTransferSrcScalarPerVector | ||
8, // ABlockTransferDstScalarPerVector_K1 | ||
true, // ABlockLdsAddExtraM | ||
S<4, 64, 1>, // BBlockTransferThreadClusterLengths_K0_N_K1 | ||
S<2, 0, 1>, // BBlockTransferThreadClusterArrangeOrder | ||
S<0, 2, 1>, // BBlockTransferSrcAccessOrder | ||
1, // BBlockTransferSrcVectorDim | ||
2, // BBlockTransferSrcScalarPerVector | ||
8, // BBlockTransferDstScalarPerVector_K1 | ||
true, // BBlockLdsAddExtraN | ||
7, | ||
1>; // GemmCThreadTransferDstScalarPerVector | ||
|
||
int main(int argc, char* argv[]) | ||
{ | ||
namespace ctc = ck::tensor_layout::convolution; | ||
|
||
print_helper_msg(); | ||
|
||
bool do_verification = true; | ||
int init_method = 1; | ||
bool time_kernel = false; | ||
|
||
ck::utils::conv::ConvParam conv_param{ | ||
2, 1, 128, 256, 256, {3, 3}, {71, 71}, {2, 2}, {1, 1}, {1, 1}, {1, 1}}; | ||
|
||
if(argc == 1) | ||
{ | ||
// use default | ||
} | ||
else if(argc == 4) | ||
{ | ||
do_verification = std::stoi(argv[1]); | ||
init_method = std::stoi(argv[2]); | ||
time_kernel = std::stoi(argv[3]); | ||
} | ||
else | ||
{ | ||
do_verification = std::stoi(argv[1]); | ||
init_method = std::stoi(argv[2]); | ||
time_kernel = std::stoi(argv[3]); | ||
const ck::index_t num_dim_spatial = std::stoi(argv[4]); | ||
|
||
conv_param = ck::utils::conv::parse_conv_param(num_dim_spatial, 5, argv); | ||
} | ||
|
||
const auto in_element_op = InElementOp{}; | ||
const auto wei_element_op = WeiElementOp{}; | ||
const auto out_element_op = OutElementOp{}; | ||
|
||
if(conv_param.num_dim_spatial_ == 1) | ||
{ | ||
using InLayout = ctc::GNWC; | ||
using WeiLayout = ctc::GKXC; | ||
using OutLayout = ctc::GNWK; | ||
|
||
const auto in_g_n_c_wis_desc = | ||
ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed<InLayout>( | ||
conv_param); | ||
|
||
const auto wei_g_k_c_xs_desc = | ||
ck::utils::conv::make_weight_host_tensor_descriptor_g_k_c_xs_packed<WeiLayout>( | ||
conv_param); | ||
|
||
const auto out_g_n_k_wos_desc = | ||
ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed<OutLayout>( | ||
conv_param); | ||
|
||
return run_conv_bwd_data<1, | ||
InDataType, | ||
WeiDataType, | ||
OutDataType, | ||
InElementOp, | ||
WeiElementOp, | ||
OutElementOp, | ||
DeviceConvNdBwdDataInstance<1>>(do_verification, | ||
init_method, | ||
time_kernel, | ||
conv_param, | ||
in_g_n_c_wis_desc, | ||
wei_g_k_c_xs_desc, | ||
out_g_n_k_wos_desc, | ||
in_element_op, | ||
wei_element_op, | ||
out_element_op); | ||
} | ||
else if(conv_param.num_dim_spatial_ == 2) | ||
{ | ||
using InLayout = ctc::GNHWC; | ||
using WeiLayout = ctc::GKYXC; | ||
using OutLayout = ctc::GNHWK; | ||
|
||
const auto in_g_n_c_wis_desc = | ||
ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed<InLayout>( | ||
conv_param); | ||
|
||
const auto wei_g_k_c_xs_desc = | ||
ck::utils::conv::make_weight_host_tensor_descriptor_g_k_c_xs_packed<WeiLayout>( | ||
conv_param); | ||
|
||
const auto out_g_n_k_wos_desc = | ||
ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed<OutLayout>( | ||
conv_param); | ||
|
||
return run_conv_bwd_data<2, | ||
InDataType, | ||
WeiDataType, | ||
OutDataType, | ||
InElementOp, | ||
WeiElementOp, | ||
OutElementOp, | ||
DeviceConvNdBwdDataInstance<2>>(do_verification, | ||
init_method, | ||
time_kernel, | ||
conv_param, | ||
in_g_n_c_wis_desc, | ||
wei_g_k_c_xs_desc, | ||
out_g_n_k_wos_desc, | ||
in_element_op, | ||
wei_element_op, | ||
out_element_op); | ||
} | ||
else if(conv_param.num_dim_spatial_ == 3) | ||
{ | ||
using InLayout = ctc::GNDHWC; | ||
using WeiLayout = ctc::GKZYXC; | ||
using OutLayout = ctc::GNDHWK; | ||
|
||
const auto in_g_n_c_wis_desc = | ||
ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed<InLayout>( | ||
conv_param); | ||
|
||
const auto wei_g_k_c_xs_desc = | ||
ck::utils::conv::make_weight_host_tensor_descriptor_g_k_c_xs_packed<WeiLayout>( | ||
conv_param); | ||
|
||
const auto out_g_n_k_wos_desc = | ||
ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed<OutLayout>( | ||
conv_param); | ||
|
||
return run_conv_bwd_data<3, | ||
InDataType, | ||
WeiDataType, | ||
OutDataType, | ||
InElementOp, | ||
WeiElementOp, | ||
OutElementOp, | ||
DeviceConvNdBwdDataInstance<3>>(do_verification, | ||
init_method, | ||
time_kernel, | ||
conv_param, | ||
in_g_n_c_wis_desc, | ||
wei_g_k_c_xs_desc, | ||
out_g_n_k_wos_desc, | ||
in_element_op, | ||
wei_element_op, | ||
out_element_op); | ||
} | ||
|
||
return 0; | ||
} |
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this file will be removed.