From nobody Thu Sep 19 01:51:29 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 209.51.188.17 as permitted sender) client-ip=209.51.188.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zohomail.com; dkim=fail; spf=pass (zoho.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (209.51.188.17 [209.51.188.17]) by mx.zohomail.com with SMTPS id 1549272668216756.4351640707846; Mon, 4 Feb 2019 01:31:08 -0800 (PST) Received: from localhost ([127.0.0.1]:39389 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1gqaak-00088m-3m for importer@patchew.org; Mon, 04 Feb 2019 04:31:02 -0500 Received: from eggs.gnu.org ([209.51.188.92]:38945) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1gqa9a-0001If-Tw for qemu-devel@nongnu.org; Mon, 04 Feb 2019 04:03:00 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1gqa9N-0000Hc-Jv for qemu-devel@nongnu.org; Mon, 04 Feb 2019 04:02:50 -0500 Received: from ozlabs.org ([2401:3900:2:1::2]:35733) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1gqa9F-0007ko-NU; Mon, 04 Feb 2019 04:02:41 -0500 Received: by ozlabs.org (Postfix, from userid 1007) id 43tMC90tLHz9sP9; Mon, 4 Feb 2019 20:01:39 +1100 (AEDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=gibson.dropbear.id.au; s=201602; t=1549270901; bh=TJ36S0zCzpR/LseaA2Z9jJBjbK8rruEpYWYWDRX85sI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=FU5rwJPKJXpGQURg8XJUbDUko9wOfHpoh3de77z8Gk0PyT6VQDPJLdWrF8ekCapLg B3Up/ta4gigO9RzV/Hm60xmMcRfP5y8kiRjniKSWt4wq6BgwfCgDxIF96i76fyUd5y sfFNuHXhWQRWXcbZjcjFX3spTPbzY8Loe55ECzcs= From: David Gibson To: peter.maydell@linaro.org Date: Mon, 4 Feb 2019 20:01:16 +1100 Message-Id: <20190204090124.26191-30-david@gibson.dropbear.id.au> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20190204090124.26191-1-david@gibson.dropbear.id.au> References: <20190204090124.26191-1-david@gibson.dropbear.id.au> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 2401:3900:2:1::2 Subject: [Qemu-devel] [PULL 29/37] target/ppc: eliminate use of HI_IDX and LO_IDX macros from int_helper.c X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: lvivier@redhat.com, Richard Henderson , Mark Cave-Ayland , qemu-devel@nongnu.org, groug@kaod.org, spopovyc@redhat.com, qemu-ppc@nongnu.org, clg@kaod.org, David Gibson Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZohoMail-DKIM: fail (Header signature does not verify) Content-Type: text/plain; charset="utf-8" From: Mark Cave-Ayland The original purpose of these macros was to correctly reference the high an= d low parts of the VSRs regardless of the host endianness. Replace these direct references to high and low parts with the relevant VsrD macro instead, and completely remove the now-unused HI_IDX and LO_IDX macro= s. Signed-off-by: Mark Cave-Ayland Reviewed-by: Richard Henderson Signed-off-by: David Gibson --- target/ppc/int_helper.c | 180 +++++++++++++++++++--------------------- 1 file changed, 85 insertions(+), 95 deletions(-) diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c index e531af5294..7a9c02d4bb 100644 --- a/target/ppc/int_helper.c +++ b/target/ppc/int_helper.c @@ -388,14 +388,6 @@ target_ulong helper_602_mfrom(target_ulong arg) =20 /*************************************************************************= ****/ /* Altivec extension helpers */ -#if defined(HOST_WORDS_BIGENDIAN) -#define HI_IDX 0 -#define LO_IDX 1 -#else -#define HI_IDX 1 -#define LO_IDX 0 -#endif - #if defined(HOST_WORDS_BIGENDIAN) #define VECTOR_FOR_INORDER_I(index, element) \ for (index =3D 0; index < ARRAY_SIZE(r->element); index++) @@ -514,8 +506,8 @@ void helper_vprtybq(ppc_avr_t *r, ppc_avr_t *b) res ^=3D res >> 32; res ^=3D res >> 16; res ^=3D res >> 8; - r->u64[LO_IDX] =3D res & 1; - r->u64[HI_IDX] =3D 0; + r->VsrD(1) =3D res & 1; + r->VsrD(0) =3D 0; } =20 #define VARITH_DO(name, op, element) \ @@ -1229,8 +1221,8 @@ void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_a= vr_t *b) } } =20 - r->u64[HI_IDX] =3D perm; - r->u64[LO_IDX] =3D 0; + r->VsrD(0) =3D perm; + r->VsrD(1) =3D 0; } =20 #undef VBPERMQ_INDEX @@ -1559,25 +1551,25 @@ void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc= _avr_t *b) ppc_avr_t prod[2]; =20 VECTOR_FOR_INORDER_I(i, u64) { - prod[i].u64[LO_IDX] =3D prod[i].u64[HI_IDX] =3D 0; + prod[i].VsrD(1) =3D prod[i].VsrD(0) =3D 0; for (j =3D 0; j < 64; j++) { if (a->u64[i] & (1ull<u64[i]; + bshift.VsrD(0) =3D 0; + bshift.VsrD(1) =3D b->u64[i]; } else { - bshift.u64[HI_IDX] =3D b->u64[i] >> (64-j); - bshift.u64[LO_IDX] =3D b->u64[i] << j; + bshift.VsrD(0) =3D b->u64[i] >> (64 - j); + bshift.VsrD(1) =3D b->u64[i] << j; } - prod[i].u64[LO_IDX] ^=3D bshift.u64[LO_IDX]; - prod[i].u64[HI_IDX] ^=3D bshift.u64[HI_IDX]; + prod[i].VsrD(1) ^=3D bshift.VsrD(1); + prod[i].VsrD(0) ^=3D bshift.VsrD(0); } } } =20 - r->u64[LO_IDX] =3D prod[0].u64[LO_IDX] ^ prod[1].u64[LO_IDX]; - r->u64[HI_IDX] =3D prod[0].u64[HI_IDX] ^ prod[1].u64[HI_IDX]; + r->VsrD(1) =3D prod[0].VsrD(1) ^ prod[1].VsrD(1); + r->VsrD(0) =3D prod[0].VsrD(0) ^ prod[1].VsrD(0); #endif } =20 @@ -1795,7 +1787,7 @@ VEXTU_X_DO(vextuwrx, 32, 0) #define VSHIFT(suffix, leftp) \ void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ { \ - int shift =3D b->u8[LO_IDX*15] & 0x7; \ + int shift =3D b->VsrB(15) & 0x7; \ int doit =3D 1; \ int i; \ \ @@ -1806,15 +1798,15 @@ VEXTU_X_DO(vextuwrx, 32, 0) if (shift =3D=3D 0) { = \ *r =3D *a; \ } else if (leftp) { \ - uint64_t carry =3D a->u64[LO_IDX] >> (64 - shift); \ + uint64_t carry =3D a->VsrD(1) >> (64 - shift); \ \ - r->u64[HI_IDX] =3D (a->u64[HI_IDX] << shift) | carry; \ - r->u64[LO_IDX] =3D a->u64[LO_IDX] << shift; \ + r->VsrD(0) =3D (a->VsrD(0) << shift) | carry; \ + r->VsrD(1) =3D a->VsrD(1) << shift; \ } else { \ - uint64_t carry =3D a->u64[HI_IDX] << (64 - shift); \ + uint64_t carry =3D a->VsrD(0) << (64 - shift); \ \ - r->u64[LO_IDX] =3D (a->u64[LO_IDX] >> shift) | carry; \ - r->u64[HI_IDX] =3D a->u64[HI_IDX] >> shift; \ + r->VsrD(1) =3D (a->VsrD(1) >> shift) | carry; \ + r->VsrD(0) =3D a->VsrD(0) >> shift; \ } \ } \ } @@ -1900,7 +1892,7 @@ void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_av= r_t *b, uint32_t shift) =20 void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { - int sh =3D (b->u8[LO_IDX*0xf] >> 3) & 0xf; + int sh =3D (b->VsrB(0xf) >> 3) & 0xf; =20 #if defined(HOST_WORDS_BIGENDIAN) memmove(&r->u8[0], &a->u8[sh], 16 - sh); @@ -2096,7 +2088,7 @@ VSR(d, u64, 0x3F) =20 void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { - int sh =3D (b->u8[LO_IDX * 0xf] >> 3) & 0xf; + int sh =3D (b->VsrB(0xf) >> 3) & 0xf; =20 #if defined(HOST_WORDS_BIGENDIAN) memmove(&r->u8[sh], &a->u8[0], 16 - sh); @@ -2352,13 +2344,13 @@ static inline void avr_qw_not(ppc_avr_t *t, ppc_avr= _t a) =20 static int avr_qw_cmpu(ppc_avr_t a, ppc_avr_t b) { - if (a.u64[HI_IDX] < b.u64[HI_IDX]) { + if (a.VsrD(0) < b.VsrD(0)) { return -1; - } else if (a.u64[HI_IDX] > b.u64[HI_IDX]) { + } else if (a.VsrD(0) > b.VsrD(0)) { return 1; - } else if (a.u64[LO_IDX] < b.u64[LO_IDX]) { + } else if (a.VsrD(1) < b.VsrD(1)) { return -1; - } else if (a.u64[LO_IDX] > b.u64[LO_IDX]) { + } else if (a.VsrD(1) > b.VsrD(1)) { return 1; } else { return 0; @@ -2367,17 +2359,17 @@ static int avr_qw_cmpu(ppc_avr_t a, ppc_avr_t b) =20 static void avr_qw_add(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b) { - t->u64[LO_IDX] =3D a.u64[LO_IDX] + b.u64[LO_IDX]; - t->u64[HI_IDX] =3D a.u64[HI_IDX] + b.u64[HI_IDX] + - (~a.u64[LO_IDX] < b.u64[LO_IDX]); + t->VsrD(1) =3D a.VsrD(1) + b.VsrD(1); + t->VsrD(0) =3D a.VsrD(0) + b.VsrD(0) + + (~a.VsrD(1) < b.VsrD(1)); } =20 static int avr_qw_addc(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b) { ppc_avr_t not_a; - t->u64[LO_IDX] =3D a.u64[LO_IDX] + b.u64[LO_IDX]; - t->u64[HI_IDX] =3D a.u64[HI_IDX] + b.u64[HI_IDX] + - (~a.u64[LO_IDX] < b.u64[LO_IDX]); + t->VsrD(1) =3D a.VsrD(1) + b.VsrD(1); + t->VsrD(0) =3D a.VsrD(0) + b.VsrD(0) + + (~a.VsrD(1) < b.VsrD(1)); avr_qw_not(¬_a, a); return avr_qw_cmpu(not_a, b) < 0; } @@ -2399,11 +2391,11 @@ void helper_vaddeuqm(ppc_avr_t *r, ppc_avr_t *a, pp= c_avr_t *b, ppc_avr_t *c) r->u128 =3D a->u128 + b->u128 + (c->u128 & 1); #else =20 - if (c->u64[LO_IDX] & 1) { + if (c->VsrD(1) & 1) { ppc_avr_t tmp; =20 - tmp.u64[HI_IDX] =3D 0; - tmp.u64[LO_IDX] =3D c->u64[LO_IDX] & 1; + tmp.VsrD(0) =3D 0; + tmp.VsrD(1) =3D c->VsrD(1) & 1; avr_qw_add(&tmp, *a, tmp); avr_qw_add(r, tmp, *b); } else { @@ -2421,8 +2413,8 @@ void helper_vaddcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_a= vr_t *b) =20 avr_qw_not(¬_a, *a); =20 - r->u64[HI_IDX] =3D 0; - r->u64[LO_IDX] =3D (avr_qw_cmpu(not_a, *b) < 0); + r->VsrD(0) =3D 0; + r->VsrD(1) =3D (avr_qw_cmpu(not_a, *b) < 0); #endif } =20 @@ -2437,7 +2429,7 @@ void helper_vaddecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_= avr_t *b, ppc_avr_t *c) r->u128 =3D carry_out; #else =20 - int carry_in =3D c->u64[LO_IDX] & 1; + int carry_in =3D c->VsrD(1) & 1; int carry_out =3D 0; ppc_avr_t tmp; =20 @@ -2447,8 +2439,8 @@ void helper_vaddecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_= avr_t *b, ppc_avr_t *c) ppc_avr_t one =3D QW_ONE; carry_out =3D avr_qw_addc(&tmp, tmp, one); } - r->u64[HI_IDX] =3D 0; - r->u64[LO_IDX] =3D carry_out; + r->VsrD(0) =3D 0; + r->VsrD(1) =3D carry_out; #endif } =20 @@ -2476,8 +2468,8 @@ void helper_vsubeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_= avr_t *b, ppc_avr_t *c) avr_qw_not(&tmp, *b); avr_qw_add(&sum, *a, tmp); =20 - tmp.u64[HI_IDX] =3D 0; - tmp.u64[LO_IDX] =3D c->u64[LO_IDX] & 1; + tmp.VsrD(0) =3D 0; + tmp.VsrD(1) =3D c->VsrD(1) & 1; avr_qw_add(r, sum, tmp); #endif } @@ -2493,10 +2485,10 @@ void helper_vsubcuq(ppc_avr_t *r, ppc_avr_t *a, ppc= _avr_t *b) ppc_avr_t tmp; avr_qw_not(&tmp, *b); avr_qw_add(&tmp, *a, tmp); - carry =3D ((tmp.s64[HI_IDX] =3D=3D -1ull) && (tmp.s64[LO_IDX] =3D= =3D -1ull)); + carry =3D ((tmp.VsrSD(0) =3D=3D -1ull) && (tmp.VsrSD(1) =3D=3D -1u= ll)); } - r->u64[HI_IDX] =3D 0; - r->u64[LO_IDX] =3D carry; + r->VsrD(0) =3D 0; + r->VsrD(1) =3D carry; #endif } =20 @@ -2507,17 +2499,17 @@ void helper_vsubecuq(ppc_avr_t *r, ppc_avr_t *a, pp= c_avr_t *b, ppc_avr_t *c) (~a->u128 < ~b->u128) || ((c->u128 & 1) && (a->u128 + ~b->u128 =3D=3D (__uint128_t)-1)); #else - int carry_in =3D c->u64[LO_IDX] & 1; + int carry_in =3D c->VsrD(1) & 1; int carry_out =3D (avr_qw_cmpu(*a, *b) > 0); if (!carry_out && carry_in) { ppc_avr_t tmp; avr_qw_not(&tmp, *b); avr_qw_add(&tmp, *a, tmp); - carry_out =3D ((tmp.u64[HI_IDX] =3D=3D -1ull) && (tmp.u64[LO_IDX] = =3D=3D -1ull)); + carry_out =3D ((tmp.VsrD(0) =3D=3D -1ull) && (tmp.VsrD(1) =3D=3D -= 1ull)); } =20 - r->u64[HI_IDX] =3D 0; - r->u64[LO_IDX] =3D carry_out; + r->VsrD(0) =3D 0; + r->VsrD(1) =3D carry_out; #endif } =20 @@ -2615,7 +2607,7 @@ static bool bcd_is_valid(ppc_avr_t *bcd) =20 static int bcd_cmp_zero(ppc_avr_t *bcd) { - if (bcd->u64[HI_IDX] =3D=3D 0 && (bcd->u64[LO_IDX] >> 4) =3D=3D 0) { + if (bcd->VsrD(0) =3D=3D 0 && (bcd->VsrD(1) >> 4) =3D=3D 0) { return CRF_EQ; } else { return (bcd_get_sgn(bcd) =3D=3D 1) ? CRF_GT : CRF_LT; @@ -2735,7 +2727,7 @@ uint32_t helper_bcdadd(ppc_avr_t *r, ppc_avr_t *a, p= pc_avr_t *b, uint32_t ps) } =20 if (unlikely(invalid)) { - result.u64[HI_IDX] =3D result.u64[LO_IDX] =3D -1; + result.VsrD(0) =3D result.VsrD(1) =3D -1; cr =3D CRF_SO; } else if (overflow) { cr |=3D CRF_SO; @@ -2804,7 +2796,7 @@ uint32_t helper_bcdctn(ppc_avr_t *r, ppc_avr_t *b, ui= nt32_t ps) int invalid =3D (sgnb =3D=3D 0); ppc_avr_t ret =3D { .u64 =3D { 0, 0 } }; =20 - int ox_flag =3D (b->u64[HI_IDX] !=3D 0) || ((b->u64[LO_IDX] >> 32) != =3D 0); + int ox_flag =3D (b->VsrD(0) !=3D 0) || ((b->VsrD(1) >> 32) !=3D 0); =20 for (i =3D 1; i < 8; i++) { set_national_digit(&ret, 0x30 + bcd_get_digit(b, i, &invalid), i); @@ -2884,7 +2876,7 @@ uint32_t helper_bcdctz(ppc_avr_t *r, ppc_avr_t *b, ui= nt32_t ps) int invalid =3D (sgnb =3D=3D 0); ppc_avr_t ret =3D { .u64 =3D { 0, 0 } }; =20 - int ox_flag =3D ((b->u64[HI_IDX] >> 4) !=3D 0); + int ox_flag =3D ((b->VsrD(0) >> 4) !=3D 0); =20 for (i =3D 0; i < 16; i++) { digit =3D bcd_get_digit(b, i + 1, &invalid); @@ -2925,13 +2917,13 @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b,= uint32_t ps) uint64_t hi_value; ppc_avr_t ret =3D { .u64 =3D { 0, 0 } }; =20 - if (b->s64[HI_IDX] < 0) { - lo_value =3D -b->s64[LO_IDX]; - hi_value =3D ~b->u64[HI_IDX] + !lo_value; + if (b->VsrSD(0) < 0) { + lo_value =3D -b->VsrSD(1); + hi_value =3D ~b->VsrD(0) + !lo_value; bcd_put_digit(&ret, 0xD, 0); } else { - lo_value =3D b->u64[LO_IDX]; - hi_value =3D b->u64[HI_IDX]; + lo_value =3D b->VsrD(1); + hi_value =3D b->VsrD(0); bcd_put_digit(&ret, bcd_preferred_sgn(0, ps), 0); } =20 @@ -2979,11 +2971,11 @@ uint32_t helper_bcdctsq(ppc_avr_t *r, ppc_avr_t *b,= uint32_t ps) } =20 if (sgnb =3D=3D -1) { - r->s64[LO_IDX] =3D -lo_value; - r->s64[HI_IDX] =3D ~hi_value + !r->s64[LO_IDX]; + r->VsrSD(1) =3D -lo_value; + r->VsrSD(0) =3D ~hi_value + !r->VsrSD(1); } else { - r->s64[LO_IDX] =3D lo_value; - r->s64[HI_IDX] =3D hi_value; + r->VsrSD(1) =3D lo_value; + r->VsrSD(0) =3D hi_value; } =20 cr =3D bcd_cmp_zero(b); @@ -3043,7 +3035,7 @@ uint32_t helper_bcds(ppc_avr_t *r, ppc_avr_t *a, ppc_= avr_t *b, uint32_t ps) bool ox_flag =3D false; int sgnb =3D bcd_get_sgn(b); ppc_avr_t ret =3D *b; - ret.u64[LO_IDX] &=3D ~0xf; + ret.VsrD(1) &=3D ~0xf; =20 if (bcd_is_valid(b) =3D=3D false) { return CRF_SO; @@ -3056,9 +3048,9 @@ uint32_t helper_bcds(ppc_avr_t *r, ppc_avr_t *a, ppc_= avr_t *b, uint32_t ps) } =20 if (i > 0) { - ulshift(&ret.u64[LO_IDX], &ret.u64[HI_IDX], i * 4, &ox_flag); + ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag); } else { - urshift(&ret.u64[LO_IDX], &ret.u64[HI_IDX], -i * 4); + urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4); } bcd_put_digit(&ret, bcd_preferred_sgn(sgnb, ps), 0); =20 @@ -3095,13 +3087,13 @@ uint32_t helper_bcdus(ppc_avr_t *r, ppc_avr_t *a, p= pc_avr_t *b, uint32_t ps) #endif if (i >=3D 32) { ox_flag =3D true; - ret.u64[LO_IDX] =3D ret.u64[HI_IDX] =3D 0; + ret.VsrD(1) =3D ret.VsrD(0) =3D 0; } else if (i <=3D -32) { - ret.u64[LO_IDX] =3D ret.u64[HI_IDX] =3D 0; + ret.VsrD(1) =3D ret.VsrD(0) =3D 0; } else if (i > 0) { - ulshift(&ret.u64[LO_IDX], &ret.u64[HI_IDX], i * 4, &ox_flag); + ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag); } else { - urshift(&ret.u64[LO_IDX], &ret.u64[HI_IDX], -i * 4); + urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4); } *r =3D ret; =20 @@ -3121,7 +3113,7 @@ uint32_t helper_bcdsr(ppc_avr_t *r, ppc_avr_t *a, ppc= _avr_t *b, uint32_t ps) bool ox_flag =3D false; int sgnb =3D bcd_get_sgn(b); ppc_avr_t ret =3D *b; - ret.u64[LO_IDX] &=3D ~0xf; + ret.VsrD(1) &=3D ~0xf; =20 #if defined(HOST_WORDS_BIGENDIAN) int i =3D a->s8[7]; @@ -3142,9 +3134,9 @@ uint32_t helper_bcdsr(ppc_avr_t *r, ppc_avr_t *a, ppc= _avr_t *b, uint32_t ps) } =20 if (i > 0) { - ulshift(&ret.u64[LO_IDX], &ret.u64[HI_IDX], i * 4, &ox_flag); + ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag); } else { - urshift(&ret.u64[LO_IDX], &ret.u64[HI_IDX], -i * 4); + urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4); =20 if (bcd_get_digit(&ret, 0, &invalid) >=3D 5) { bcd_add_mag(&ret, &ret, &bcd_one, &invalid, &unused); @@ -3178,19 +3170,19 @@ uint32_t helper_bcdtrunc(ppc_avr_t *r, ppc_avr_t *a= , ppc_avr_t *b, uint32_t ps) =20 if (i > 16 && i < 32) { mask =3D (uint64_t)-1 >> (128 - i * 4); - if (ret.u64[HI_IDX] & ~mask) { + if (ret.VsrD(0) & ~mask) { ox_flag =3D CRF_SO; } =20 - ret.u64[HI_IDX] &=3D mask; + ret.VsrD(0) &=3D mask; } else if (i >=3D 0 && i <=3D 16) { mask =3D (uint64_t)-1 >> (64 - i * 4); - if (ret.u64[HI_IDX] || (ret.u64[LO_IDX] & ~mask)) { + if (ret.VsrD(0) || (ret.VsrD(1) & ~mask)) { ox_flag =3D CRF_SO; } =20 - ret.u64[LO_IDX] &=3D mask; - ret.u64[HI_IDX] =3D 0; + ret.VsrD(1) &=3D mask; + ret.VsrD(0) =3D 0; } bcd_put_digit(&ret, bcd_preferred_sgn(bcd_get_sgn(b), ps), 0); *r =3D ret; @@ -3221,28 +3213,28 @@ uint32_t helper_bcdutrunc(ppc_avr_t *r, ppc_avr_t *= a, ppc_avr_t *b, uint32_t ps) #endif if (i > 16 && i < 33) { mask =3D (uint64_t)-1 >> (128 - i * 4); - if (ret.u64[HI_IDX] & ~mask) { + if (ret.VsrD(0) & ~mask) { ox_flag =3D CRF_SO; } =20 - ret.u64[HI_IDX] &=3D mask; + ret.VsrD(0) &=3D mask; } else if (i > 0 && i <=3D 16) { mask =3D (uint64_t)-1 >> (64 - i * 4); - if (ret.u64[HI_IDX] || (ret.u64[LO_IDX] & ~mask)) { + if (ret.VsrD(0) || (ret.VsrD(1) & ~mask)) { ox_flag =3D CRF_SO; } =20 - ret.u64[LO_IDX] &=3D mask; - ret.u64[HI_IDX] =3D 0; + ret.VsrD(1) &=3D mask; + ret.VsrD(0) =3D 0; } else if (i =3D=3D 0) { - if (ret.u64[HI_IDX] || ret.u64[LO_IDX]) { + if (ret.VsrD(0) || ret.VsrD(1)) { ox_flag =3D CRF_SO; } - ret.u64[HI_IDX] =3D ret.u64[LO_IDX] =3D 0; + ret.VsrD(0) =3D ret.VsrD(1) =3D 0; } =20 *r =3D ret; - if (r->u64[HI_IDX] =3D=3D 0 && r->u64[LO_IDX] =3D=3D 0) { + if (r->VsrD(0) =3D=3D 0 && r->VsrD(1) =3D=3D 0) { return ox_flag | CRF_EQ; } =20 @@ -3414,8 +3406,6 @@ void helper_vpermxor(ppc_avr_t *r, ppc_avr_t *a, ppc= _avr_t *b, ppc_avr_t *c) } =20 #undef VECTOR_FOR_INORDER_I -#undef HI_IDX -#undef LO_IDX =20 /*************************************************************************= ****/ /* SPE extension helpers */ --=20 2.20.1