From 18c949ec13ea28fd8877d050d0812f50d35cd203 Mon Sep 17 00:00:00 2001 From: zjing14 Date: Tue, 10 Sep 2019 19:44:51 -0500 Subject: [PATCH] Disabled implictiGemm FP16 due to NaN issue (#2072) * diable_implicitemm_fp16(1): disable fp16 of implictigemm * diable_implicitemm_fp16(2): add issue no --- src/solver/conv_hip_implicit_gemm_v4_fwd.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/solver/conv_hip_implicit_gemm_v4_fwd.cpp b/src/solver/conv_hip_implicit_gemm_v4_fwd.cpp index 2ed1011ad3..b0fbe47b32 100644 --- a/src/solver/conv_hip_implicit_gemm_v4_fwd.cpp +++ b/src/solver/conv_hip_implicit_gemm_v4_fwd.cpp @@ -34,7 +34,9 @@ namespace solver { bool ConvHipImplicitGemmV4Fwd::IsApplicable(const ConvolutionContext& ctx) const { - bool isTypeSupported = (ctx.IsFp32() || ctx.IsFp16()); + // disable IsFp16 due to NaN (issue #2071); + ///\todo: 1) Fixed NaN issue in Fp16, 2) enable Fp16 and 3) add Fp16 tests + bool isTypeSupported = ctx.IsFp32(); // For fp16, when c*x*y % 64 == 0, 4 channels are accumulated through dot4 (2 * dot2) operation // when c*x*y % 32 == 0, channels are accumulated through dot2 operation.