From nobody Sat May 4 13:59:59 2024 Delivered-To: importer@patchew.org Authentication-Results: mx.zohomail.com; spf=pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org ARC-Seal: i=1; a=rsa-sha256; t=1605359476; cv=none; d=zohomail.com; s=zohoarc; b=VPGKtmtOPIVm/Q+pq1NurAFxf4fcE8pp/KROQZhBW+vIti9X0Rb/J3/DtqY+OWB3fP8ojcEiEYqbcrU9e/x1qT8q3ZsidtDtMi+rZpjMs4JvMOn/mivcORh/WECmQEz1DLVSQ7hVSbPi/Y86Qp19gJN1rU6j/KvrtNtAp2HJqrI= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1605359476; h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:Sender:Subject:To; bh=IBM/ggU4HEufGCoCMtFcIT4UQWWovRzFTZVdQvrytXs=; b=dlOw0FxxDNx8yhu73fni8p4g2v6Vp+eJMny40rINgTwQl7UBY3/EC5JiaFStNcOnv7jbn3zlpAYMTGTts4NPqI+mh243hCKlWd+Z4rbv6NmbrcbK2v56znfKP2QbDir4PdmnIlf0bKtWjTyzFY1eyBgIiEhlvlw4wFF49La8c0g= ARC-Authentication-Results: i=1; mx.zohomail.com; spf=pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) by mx.zohomail.com with SMTPS id 1605359476670972.44467219468; Sat, 14 Nov 2020 05:11:16 -0800 (PST) Received: from localhost ([::1]:34150 helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1kdvKk-0008Iu-QL for importer@patchew.org; Sat, 14 Nov 2020 08:11:14 -0500 Received: from eggs.gnu.org ([2001:470:142:3::10]:55464) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1kdvJP-0007Yn-0e; Sat, 14 Nov 2020 08:09:51 -0500 Received: from szxga07-in.huawei.com ([45.249.212.35]:2144) by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1kdvJJ-0004NY-Fj; Sat, 14 Nov 2020 08:09:50 -0500 Received: from DGGEMS406-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga07-in.huawei.com (SkyGuard) with ESMTP id 4CYFz40P3rz6wHn; Sat, 14 Nov 2020 21:09:04 +0800 (CST) Received: from huawei.com (10.175.101.6) by DGGEMS406-HUB.china.huawei.com (10.3.19.206) with Microsoft SMTP Server id 14.3.487.0; Sat, 14 Nov 2020 21:09:09 +0800 From: Xinhao Zhang To: Subject: [PATCH] fpu/softfloat.c: add spaces around operator Date: Sat, 14 Nov 2020 21:07:15 +0800 Message-ID: <20201114130715.1126922-1-zhangxinhao1@huawei.com> X-Mailer: git-send-email 2.29.0-rc1 MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.175.101.6] X-CFilter-Loop: Reflected Received-SPF: pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) client-ip=209.51.188.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Received-SPF: pass client-ip=45.249.212.35; envelope-from=zhangxinhao1@huawei.com; helo=szxga07-in.huawei.com X-detected-operating-system: by eggs.gnu.org: First seen = 2020/11/14 08:09:16 X-ACL-Warn: Detected OS = Linux 3.1-3.10 [fuzzy] X-Spam_score_int: -41 X-Spam_score: -4.2 X-Spam_bar: ---- X-Spam_report: (-4.2 / 5.0 requ) BAYES_00=-1.9, RCVD_IN_DNSWL_MED=-2.3, RCVD_IN_MSPIKE_H4=0.001, RCVD_IN_MSPIKE_WL=0.001, SPF_HELO_NONE=0.001, SPF_PASS=-0.001 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: peter.maydell@linaro.org, qemu-trivial@nongnu.org, alex.chen@huawei.com, alex.bennee@linaro.org, aurelien@aurel32.net, dengkai1@huawei.com Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" Content-Type: text/plain; charset="utf-8" Fix code style. Operator needs spaces both sides. Signed-off-by: Xinhao Zhang Signed-off-by: Kai Deng --- fpu/softfloat.c | 150 ++++++++++++++++++++++++------------------------ 1 file changed, 75 insertions(+), 75 deletions(-) diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 67cfa0fd82..9938a20905 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -3786,13 +3786,13 @@ static int32_t roundAndPackInt32(bool zSign, uint64= _t absZ, abort(); } roundBits =3D absZ & 0x7F; - absZ =3D ( absZ + roundIncrement )>>7; + absZ =3D ( absZ + roundIncrement ) >> 7; if (!(roundBits ^ 0x40) && roundNearestEven) { absZ &=3D ~1; } z =3D absZ; if ( zSign ) z =3D - z; - if ( ( absZ>>32 ) || ( z && ( ( z < 0 ) ^ zSign ) ) ) { + if ( ( absZ >> 32 ) || ( z && ( ( z < 0 ) ^ zSign ) ) ) { float_raise(float_flag_invalid, status); return zSign ? INT32_MIN : INT32_MAX; } @@ -3938,7 +3938,7 @@ static void int8_t shiftCount; =20 shiftCount =3D clz32(aSig) - 8; - *zSigPtr =3D aSig<float_exception_flags |=3D float_flag_inexact; } - zSig =3D ( zSig + roundIncrement )>>7; + zSig =3D ( zSig + roundIncrement ) >> 7; if (!(roundBits ^ 0x40) && roundNearestEven) { zSig &=3D ~1; } @@ -4058,7 +4058,7 @@ static float32 int8_t shiftCount; =20 shiftCount =3D clz32(zSig) - 1; - return roundAndPackFloat32(zSign, zExp - shiftCount, zSig<float_exception_flags |=3D float_flag_inexact; } - zSig =3D ( zSig + roundIncrement )>>10; + zSig =3D ( zSig + roundIncrement ) >> 10; if (!(roundBits ^ 0x200) && roundNearestEven) { zSig &=3D ~1; } @@ -4214,7 +4214,7 @@ static float64 int8_t shiftCount; =20 shiftCount =3D clz64(zSig) - 1; - return roundAndPackFloat64(zSign, zExp - shiftCount, zSig<>48 ) & 0x7FFF; + return ( a.high >> 48 ) & 0x7FFF; =20 } =20 @@ -4546,11 +4546,11 @@ static void if ( aSig0 =3D=3D 0 ) { shiftCount =3D clz64(aSig1) - 15; if ( shiftCount < 0 ) { - *zSig0Ptr =3D aSig1>>( - shiftCount ); - *zSig1Ptr =3D aSig1<<( shiftCount & 63 ); + *zSig0Ptr =3D aSig1 >> ( - shiftCount ); + *zSig1Ptr =3D aSig1 << ( shiftCount & 63 ); } else { - *zSig0Ptr =3D aSig1<>=3D 32 - expDiff; bSig >>=3D 2; - aSig =3D ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; + aSig =3D ( ( aSig >> 1 ) << ( expDiff - 1 ) ) - bSig * q; } else { aSig >>=3D 2; @@ -5009,21 +5009,21 @@ float32 float32_rem(float32 a, float32 b, float_sta= tus *status) } else { if ( bSig <=3D aSig ) aSig -=3D bSig; - aSig64 =3D ( (uint64_t) aSig )<<40; - bSig64 =3D ( (uint64_t) bSig )<<40; + aSig64 =3D ( (uint64_t) aSig ) << 40; + bSig64 =3D ( (uint64_t) bSig ) << 40; expDiff -=3D 64; while ( 0 < expDiff ) { q64 =3D estimateDiv128To64( aSig64, 0, bSig64 ); q64 =3D ( 2 < q64 ) ? q64 - 2 : 0; - aSig64 =3D - ( ( bSig * q64 )<<38 ); + aSig64 =3D - ( ( bSig * q64 ) << 38 ); expDiff -=3D 62; } expDiff +=3D 64; q64 =3D estimateDiv128To64( aSig64, 0, bSig64 ); q64 =3D ( 2 < q64 ) ? q64 - 2 : 0; - q =3D q64>>( 64 - expDiff ); + q =3D q64 >> ( 64 - expDiff ); bSig <<=3D 6; - aSig =3D ( ( aSig64>>33 )<<( expDiff - 1 ) ) - bSig * q; + aSig =3D ( ( aSig64 >> 33 ) << ( expDiff - 1 ) ) - bSig * q; } do { alternateASig =3D aSig; @@ -5302,7 +5302,7 @@ float64 float64_rem(float64 a, float64 b, float_statu= s *status) while ( 0 < expDiff ) { q =3D estimateDiv128To64( aSig, 0, bSig ); q =3D ( 2 < q ) ? q - 2 : 0; - aSig =3D - ( ( bSig>>2 ) * q ); + aSig =3D - ( ( bSig >> 2 ) * q ); expDiff -=3D 62; } expDiff +=3D 64; @@ -5311,7 +5311,7 @@ float64 float64_rem(float64 a, float64 b, float_statu= s *status) q =3D ( 2 < q ) ? q - 2 : 0; q >>=3D 64 - expDiff; bSig >>=3D 2; - aSig =3D ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; + aSig =3D ( ( aSig >> 1 ) << ( expDiff - 1 ) ) - bSig * q; } else { aSig >>=3D 2; @@ -5404,7 +5404,7 @@ int32_t floatx80_to_int32(floatx80 a, float_status *s= tatus) aSig =3D extractFloatx80Frac( a ); aExp =3D extractFloatx80Exp( a ); aSign =3D extractFloatx80Sign( a ); - if ( ( aExp =3D=3D 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) aSign =3D 0; + if ( ( aExp =3D=3D 0x7FFF ) && (uint64_t) ( aSig << 1 ) ) aSign =3D 0; shiftCount =3D 0x4037 - aExp; if ( shiftCount <=3D 0 ) shiftCount =3D 1; shift64RightJamming( aSig, shiftCount, &aSig ); @@ -5437,7 +5437,7 @@ int32_t floatx80_to_int32_round_to_zero(floatx80 a, f= loat_status *status) aExp =3D extractFloatx80Exp( a ); aSign =3D extractFloatx80Sign( a ); if ( 0x401E < aExp ) { - if ( ( aExp =3D=3D 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) aSign =3D = 0; + if ( ( aExp =3D=3D 0x7FFF ) && (uint64_t) ( aSig << 1 ) ) aSign = =3D 0; goto invalid; } else if ( aExp < 0x3FFF ) { @@ -5456,7 +5456,7 @@ int32_t floatx80_to_int32_round_to_zero(floatx80 a, f= loat_status *status) float_raise(float_flag_invalid, status); return aSign ? (int32_t) 0x80000000 : 0x7FFFFFFF; } - if ( ( aSig<float_exception_flags |=3D float_flag_inexact; } return z; @@ -5545,8 +5545,8 @@ int64_t floatx80_to_int64_round_to_zero(floatx80 a, f= loat_status *status) } return 0; } - z =3D aSig>>( - shiftCount ); - if ( (uint64_t) ( aSig<<( shiftCount & 63 ) ) ) { + z =3D aSig >> ( - shiftCount ); + if ( (uint64_t) ( aSig << ( shiftCount & 63 ) ) ) { status->float_exception_flags |=3D float_flag_inexact; } if ( aSign ) z =3D - z; @@ -5575,7 +5575,7 @@ float32 floatx80_to_float32(floatx80 a, float_status = *status) aExp =3D extractFloatx80Exp( a ); aSign =3D extractFloatx80Sign( a ); if ( aExp =3D=3D 0x7FFF ) { - if ( (uint64_t) ( aSig<<1 ) ) { + if ( (uint64_t) ( aSig << 1 ) ) { float32 res =3D commonNaNToFloat32(floatx80ToCommonNaN(a, stat= us), status); return float32_silence_nan(res, status); @@ -5609,7 +5609,7 @@ float64 floatx80_to_float64(floatx80 a, float_status = *status) aExp =3D extractFloatx80Exp( a ); aSign =3D extractFloatx80Sign( a ); if ( aExp =3D=3D 0x7FFF ) { - if ( (uint64_t) ( aSig<<1 ) ) { + if ( (uint64_t) ( aSig << 1 ) ) { float64 res =3D commonNaNToFloat64(floatx80ToCommonNaN(a, stat= us), status); return float64_silence_nan(res, status); @@ -5642,12 +5642,12 @@ float128 floatx80_to_float128(floatx80 a, float_sta= tus *status) aSig =3D extractFloatx80Frac( a ); aExp =3D extractFloatx80Exp( a ); aSign =3D extractFloatx80Sign( a ); - if ( ( aExp =3D=3D 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) { + if ( ( aExp =3D=3D 0x7FFF ) && (uint64_t) ( aSig << 1 ) ) { float128 res =3D commonNaNToFloat128(floatx80ToCommonNaN(a, status= ), status); return float128_silence_nan(res, status); } - shift128Right( aSig<<1, 0, 16, &zSig0, &zSig1 ); + shift128Right( aSig << 1, 0, 16, &zSig0, &zSig1 ); return packFloat128( aSign, aExp, zSig0, zSig1 ); =20 } @@ -5688,7 +5688,7 @@ floatx80 floatx80_round_to_int(floatx80 a, float_stat= us *status) } aExp =3D extractFloatx80Exp( a ); if ( 0x403E <=3D aExp ) { - if ( ( aExp =3D=3D 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( a= )<<1 ) ) { + if ( ( aExp =3D=3D 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( a= ) << 1 ) ) { return propagateFloatx80NaN(a, a, status); } return a; @@ -5702,7 +5702,7 @@ floatx80 floatx80_round_to_int(floatx80 a, float_stat= us *status) aSign =3D extractFloatx80Sign( a ); switch (status->float_rounding_mode) { case float_round_nearest_even: - if ( ( aExp =3D=3D 0x3FFE ) && (uint64_t) ( extractFloatx80Fra= c( a )<<1 ) + if ( ( aExp =3D=3D 0x3FFE ) && (uint64_t) ( extractFloatx80Fra= c( a ) << 1 ) ) { return packFloatx80( aSign, 0x3FFF, UINT64_C(0x80000000000000= 00)); @@ -5736,7 +5736,7 @@ floatx80 floatx80_round_to_int(floatx80 a, float_stat= us *status) z =3D a; switch (status->float_rounding_mode) { case float_round_nearest_even: - z.low +=3D lastBitMask>>1; + z.low +=3D lastBitMask >> 1; if ((z.low & roundBitsMask) =3D=3D 0) { z.low &=3D ~lastBitMask; } @@ -5817,7 +5817,7 @@ static floatx80 addFloatx80Sigs(floatx80 a, floatx80 = b, bool zSign, } else { if ( aExp =3D=3D 0x7FFF ) { - if ( (uint64_t) ( ( aSig | bSig )<<1 ) ) { + if ( (uint64_t) ( ( aSig | bSig ) << 1 ) ) { return propagateFloatx80NaN(a, b, status); } return a; @@ -5874,7 +5874,7 @@ static floatx80 subFloatx80Sigs(floatx80 a, floatx80 = b, bool zSign, if ( 0 < expDiff ) goto aExpBigger; if ( expDiff < 0 ) goto bExpBigger; if ( aExp =3D=3D 0x7FFF ) { - if ( (uint64_t) ( ( aSig | bSig )<<1 ) ) { + if ( (uint64_t) ( ( aSig | bSig ) << 1 ) ) { return propagateFloatx80NaN(a, b, status); } float_raise(float_flag_invalid, status); @@ -5994,8 +5994,8 @@ floatx80 floatx80_mul(floatx80 a, floatx80 b, float_s= tatus *status) bSign =3D extractFloatx80Sign( b ); zSign =3D aSign ^ bSign; if ( aExp =3D=3D 0x7FFF ) { - if ( (uint64_t) ( aSig<<1 ) - || ( ( bExp =3D=3D 0x7FFF ) && (uint64_t) ( bSig<<1 ) ) ) { + if ( (uint64_t) ( aSig << 1 ) + || ( ( bExp =3D=3D 0x7FFF ) && (uint64_t) ( bSig << 1 ) ) ) { return propagateFloatx80NaN(a, b, status); } if ( ( bExp | bSig ) =3D=3D 0 ) goto invalid; @@ -6106,7 +6106,7 @@ floatx80 floatx80_div(floatx80 a, floatx80 b, float_s= tatus *status) add128( rem0, rem1, 0, bSig, &rem0, &rem1 ); } zSig1 =3D estimateDiv128To64( rem1, 0, bSig ); - if ( (uint64_t) ( zSig1<<1 ) <=3D 8 ) { + if ( (uint64_t) ( zSig1 << 1 ) <=3D 8 ) { mul64To128( bSig, zSig1, &term1, &term2 ); sub128( rem1, 0, term1, term2, &rem1, &rem2 ); while ( (int64_t) rem1 < 0 ) { @@ -6147,8 +6147,8 @@ floatx80 floatx80_modrem(floatx80 a, floatx80 b, bool= mod, uint64_t *quotient, bSig =3D extractFloatx80Frac( b ); bExp =3D extractFloatx80Exp( b ); if ( aExp =3D=3D 0x7FFF ) { - if ( (uint64_t) ( aSig0<<1 ) - || ( ( bExp =3D=3D 0x7FFF ) && (uint64_t) ( bSig<<1 ) ) ) { + if ( (uint64_t) ( aSig0 << 1 ) + || ( ( bExp =3D=3D 0x7FFF ) && (uint64_t) ( bSig << 1 ) ) ) { return propagateFloatx80NaN(a, b, status); } goto invalid; @@ -6213,7 +6213,7 @@ floatx80 floatx80_modrem(floatx80 a, floatx80 b, bool= mod, uint64_t *quotient, q =3D estimateDiv128To64( aSig0, aSig1, bSig ); q =3D ( 2 < q ) ? q - 2 : 0; q >>=3D 64 - expDiff; - mul64To128( bSig, q<<( 64 - expDiff ), &term0, &term1 ); + mul64To128( bSig, q << ( 64 - expDiff ), &term0, &term1 ); sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); shortShift128Left( 0, bSig, 64 - expDiff, &term0, &term1 ); while ( le128( term0, term1, aSig0, aSig1 ) ) { @@ -6310,17 +6310,17 @@ floatx80 floatx80_sqrt(floatx80 a, float_status *st= atus) if ( aSig0 =3D=3D 0 ) return packFloatx80( 0, 0, 0 ); normalizeFloatx80Subnormal( aSig0, &aExp, &aSig0 ); } - zExp =3D ( ( aExp - 0x3FFF )>>1 ) + 0x3FFF; - zSig0 =3D estimateSqrt32( aExp, aSig0>>32 ); + zExp =3D ( ( aExp - 0x3FFF ) >> 1 ) + 0x3FFF; + zSig0 =3D estimateSqrt32( aExp, aSig0 >> 32 ); shift128Right( aSig0, 0, 2 + ( aExp & 1 ), &aSig0, &aSig1 ); - zSig0 =3D estimateDiv128To64( aSig0, aSig1, zSig0<<32 ) + ( zSig0<<30 = ); - doubleZSig0 =3D zSig0<<1; + zSig0 =3D estimateDiv128To64( aSig0, aSig1, zSig0 << 32 ) + ( zSig0 <<= 30 ); + doubleZSig0 =3D zSig0 << 1; mul64To128( zSig0, zSig0, &term0, &term1 ); sub128( aSig0, aSig1, term0, term1, &rem0, &rem1 ); while ( (int64_t) rem0 < 0 ) { --zSig0; doubleZSig0 -=3D 2; - add128( rem0, rem1, zSig0>>63, doubleZSig0 | 1, &rem0, &rem1 ); + add128( rem0, rem1, zSig0 >> 63, doubleZSig0 | 1, &rem0, &rem1 ); } zSig1 =3D estimateDiv128To64( rem1, 0, doubleZSig0 ); if ( ( zSig1 & UINT64_C(0x3FFFFFFFFFFFFFFF) ) <=3D 5 ) { @@ -6416,7 +6416,7 @@ int32_t float128_to_int32_round_to_zero(float128 a, f= loat_status *status) float_raise(float_flag_invalid, status); return aSign ? INT32_MIN : INT32_MAX; } - if ( ( aSig0<float_exception_flags |=3D float_flag_inexact; } return z; @@ -6506,8 +6506,8 @@ int64_t float128_to_int64_round_to_zero(float128 a, f= loat_status *status) } return INT64_MIN; } - z =3D ( aSig0<>( ( - shiftCount ) & 63 ) ); - if ( (uint64_t) ( aSig1<> ( ( - shiftCount ) & 63= ) ); + if ( (uint64_t) ( aSig1 << shiftCount ) ) { status->float_exception_flags |=3D float_flag_inexact; } } @@ -6518,9 +6518,9 @@ int64_t float128_to_int64_round_to_zero(float128 a, f= loat_status *status) } return 0; } - z =3D aSig0>>( - shiftCount ); + z =3D aSig0 >> ( - shiftCount ); if ( aSig1 - || ( shiftCount && (uint64_t) ( aSig0<<( shiftCount & 63 ) ) = ) ) { + || ( shiftCount && (uint64_t) ( aSig0 << ( shiftCount & 63 ) = ) ) ) { status->float_exception_flags |=3D float_flag_inexact; } } @@ -6776,19 +6776,19 @@ float128 float128_round_to_int(float128 a, float_st= atus *status) return a; } lastBitMask =3D 1; - lastBitMask =3D ( lastBitMask<<( 0x406E - aExp ) )<<1; + lastBitMask =3D ( lastBitMask << ( 0x406E - aExp ) ) << 1; roundBitsMask =3D lastBitMask - 1; z =3D a; switch (status->float_rounding_mode) { case float_round_nearest_even: if ( lastBitMask ) { - add128( z.high, z.low, 0, lastBitMask>>1, &z.high, &z.low = ); + add128( z.high, z.low, 0, lastBitMask >> 1, &z.high, &z.lo= w ); if ( ( z.low & roundBitsMask ) =3D=3D 0 ) z.low &=3D ~ las= tBitMask; } else { if ( (int64_t) z.low < 0 ) { ++z.high; - if ( (uint64_t) ( z.low<<1 ) =3D=3D 0 ) z.high &=3D ~1; + if ( (uint64_t) ( z.low << 1 ) =3D=3D 0 ) z.high &=3D = ~1; } } break; @@ -6829,7 +6829,7 @@ float128 float128_round_to_int(float128 a, float_stat= us *status) } else { if ( aExp < 0x3FFF ) { - if ( ( ( (uint64_t) ( a.high<<1 ) ) | a.low ) =3D=3D 0 ) retur= n a; + if ( ( ( (uint64_t) ( a.high << 1 ) ) | a.low ) =3D=3D 0 ) ret= urn a; status->float_exception_flags |=3D float_flag_inexact; aSign =3D extractFloat128Sign( a ); switch (status->float_rounding_mode) { @@ -6870,13 +6870,13 @@ float128 float128_round_to_int(float128 a, float_st= atus *status) z.high =3D a.high; switch (status->float_rounding_mode) { case float_round_nearest_even: - z.high +=3D lastBitMask>>1; + z.high +=3D lastBitMask >> 1; if ( ( ( z.high & roundBitsMask ) | a.low ) =3D=3D 0 ) { z.high &=3D ~ lastBitMask; } break; case float_round_ties_away: - z.high +=3D lastBitMask>>1; + z.high +=3D lastBitMask >> 1; break; case float_round_to_zero: break; @@ -7422,18 +7422,18 @@ float128 float128_sqrt(float128 a, float_status *st= atus) if ( ( aSig0 | aSig1 ) =3D=3D 0 ) return packFloat128( 0, 0, 0, 0 = ); normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); } - zExp =3D ( ( aExp - 0x3FFF )>>1 ) + 0x3FFE; + zExp =3D ( ( aExp - 0x3FFF ) >> 1 ) + 0x3FFE; aSig0 |=3D UINT64_C(0x0001000000000000); - zSig0 =3D estimateSqrt32( aExp, aSig0>>17 ); + zSig0 =3D estimateSqrt32( aExp, aSig0 >> 17 ); shortShift128Left( aSig0, aSig1, 13 - ( aExp & 1 ), &aSig0, &aSig1 ); - zSig0 =3D estimateDiv128To64( aSig0, aSig1, zSig0<<32 ) + ( zSig0<<30 = ); - doubleZSig0 =3D zSig0<<1; + zSig0 =3D estimateDiv128To64( aSig0, aSig1, zSig0 << 32 ) + ( zSig0 <<= 30 ); + doubleZSig0 =3D zSig0 << 1; mul64To128( zSig0, zSig0, &term0, &term1 ); sub128( aSig0, aSig1, term0, term1, &rem0, &rem1 ); while ( (int64_t) rem0 < 0 ) { --zSig0; doubleZSig0 -=3D 2; - add128( rem0, rem1, zSig0>>63, doubleZSig0 | 1, &rem0, &rem1 ); + add128( rem0, rem1, zSig0 >> 63, doubleZSig0 | 1, &rem0, &rem1 ); } zSig1 =3D estimateDiv128To64( rem1, 0, doubleZSig0 ); if ( ( zSig1 & 0x1FFF ) <=3D 5 ) { @@ -7467,9 +7467,9 @@ floatx80_compare_internal(floatx80 a, floatx80 b, boo= l is_quiet, return float_relation_unordered; } if (( ( extractFloatx80Exp( a ) =3D=3D 0x7fff ) && - ( extractFloatx80Frac( a )<<1 ) ) || + ( extractFloatx80Frac( a ) << 1 ) ) || ( ( extractFloatx80Exp( b ) =3D=3D 0x7fff ) && - ( extractFloatx80Frac( b )<<1 ) )) { + ( extractFloatx80Frac( b ) << 1 ) )) { if (!is_quiet || floatx80_is_signaling_nan(a, status) || floatx80_is_signaling_nan(b, status)) { @@ -7535,7 +7535,7 @@ float128_compare_internal(float128 a, float128 b, boo= l is_quiet, aSign =3D extractFloat128Sign( a ); bSign =3D extractFloat128Sign( b ); if ( aSign !=3D bSign ) { - if ( ( ( ( a.high | b.high )<<1 ) | a.low | b.low ) =3D=3D 0 ) { + if ( ( ( ( a.high | b.high ) << 1 ) | a.low | b.low ) =3D=3D 0 ) { /* zero case */ return float_relation_equal; } else { @@ -7576,7 +7576,7 @@ floatx80 floatx80_scalbn(floatx80 a, int n, float_sta= tus *status) aSign =3D extractFloatx80Sign( a ); =20 if ( aExp =3D=3D 0x7FFF ) { - if ( aSig<<1 ) { + if ( aSig << 1 ) { return propagateFloatx80NaN(a, a, status); } return a; --=20 2.29.0-rc1