From nobody Mon Feb 9 08:56:55 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 209.51.188.17 as permitted sender) client-ip=209.51.188.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org; dmarc=fail(p=none dis=none) header.from=redhat.com ARC-Seal: i=1; a=rsa-sha256; t=1567757668; cv=none; d=zoho.com; s=zohoarc; b=ekze/no0K1hdTJc4WAYB3IbJvoOMxZFrXmGBVc4tMCpuhzoJi6v1CY5shLkatwML2lPg/OcuZP+fdIP+D7VkFGvhfqSWxwxrk1dOShRZdtr9aU7J/ETfREt2xor3of0ls24AFYKk32FJdXKE2hxJIIyA/SiRfEquXUK1kAGcekg= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com; s=zohoarc; t=1567757668; h=Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results; bh=1I2+WC65KexuMqfnR26h58+ceDeIa1YwUFlpJHzTuPs=; b=M7id2Io5KslucgJcrWTD7FcEoDcfFW2PWW5HXZHMWI8zKf/L3kNHl9D78snHYlLHIC0+hJyirLy+xOhVzDFWlyjbxmHyhb3zVj+xJ2vSbo6G/HKaraTQXv7f9wV72Dw65CMZC0DNq5OWWNtdUthsX1P6gg+PUPixU4aRbiAtWt0= ARC-Authentication-Results: i=1; mx.zoho.com; spf=pass (zoho.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org; dmarc=fail header.from= (p=none dis=none) header.from= Return-Path: Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) by mx.zohomail.com with SMTPS id 1567757668869998.2700017344958; Fri, 6 Sep 2019 01:14:28 -0700 (PDT) Received: from localhost ([::1]:53348 helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1i69Nz-0006Y0-Ev for importer@patchew.org; Fri, 06 Sep 2019 04:14:27 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]:42303) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1i698e-00046I-Tg for qemu-devel@nongnu.org; Fri, 06 Sep 2019 03:58:38 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1i698d-0003Pc-GI for qemu-devel@nongnu.org; Fri, 06 Sep 2019 03:58:36 -0400 Received: from mx1.redhat.com ([209.132.183.28]:49428) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1i698d-0003P4-8L; Fri, 06 Sep 2019 03:58:35 -0400 Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 8EF7510F23EC; Fri, 6 Sep 2019 07:58:34 +0000 (UTC) Received: from t460s.redhat.com (ovpn-117-162.ams2.redhat.com [10.36.117.162]) by smtp.corp.redhat.com (Postfix) with ESMTP id A33991001284; Fri, 6 Sep 2019 07:58:30 +0000 (UTC) From: David Hildenbrand To: qemu-devel@nongnu.org Date: Fri, 6 Sep 2019 09:57:37 +0200 Message-Id: <20190906075750.14791-16-david@redhat.com> In-Reply-To: <20190906075750.14791-1-david@redhat.com> References: <20190906075750.14791-1-david@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.6.2 (mx1.redhat.com [10.5.110.66]); Fri, 06 Sep 2019 07:58:34 +0000 (UTC) Content-Transfer-Encoding: quoted-printable X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.2.x-3.x [generic] X-Received-From: 209.132.183.28 Subject: [Qemu-devel] [PATCH v2 15/28] s390x/tcg: Fault-safe memset X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Florian Weimer , Thomas Huth , David Hildenbrand , Cornelia Huck , Stefano Brivio , qemu-s390x@nongnu.org, Richard Henderson Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" Content-Type: text/plain; charset="utf-8" Replace fast_memset() by access_memset(), that first tries to probe access to all affected pages (maximum is two). We'll use the same mechanism for other types of accesses soon. Only in very rare cases (especially TLB_NOTDIRTY), we'll have to fallback to ld/st helpers. Signed-off-by: David Hildenbrand Reviewed-by: Richard Henderson --- target/s390x/mem_helper.c | 99 ++++++++++++++++++++++++++++++--------- 1 file changed, 77 insertions(+), 22 deletions(-) diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c index 8dd58b3ab1..8d654b24e7 100644 --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -117,27 +117,82 @@ static inline void cpu_stsize_data_ra(CPUS390XState *= env, uint64_t addr, } } =20 -static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte, - uint32_t l, uintptr_t ra) -{ - int mmu_idx =3D cpu_mmu_index(env, false); +/* + * An access covers at most 4096 bytes and therefore at most two pages. If + * we can't access the host page directly, we'll have to do I/O access + * via ld/st helpers. + */ +typedef struct S390Access { + target_ulong vaddr1; + target_ulong vaddr2; + char *haddr1; + char *haddr2; + uint16_t size1; + uint16_t size2; +} S390Access; + +static S390Access access_prepare_idx(CPUS390XState *env, vaddr vaddr, int = size, + MMUAccessType access_type, int mmu_id= x, + uintptr_t ra) +{ + S390Access access =3D { + .vaddr1 =3D vaddr, + .size1 =3D MIN(size, -(vaddr | TARGET_PAGE_MASK)), + }; + + g_assert(size > 0 && size <=3D 4096); + access.haddr1 =3D probe_access(env, access.vaddr1, access.size1, acces= s_type, + mmu_idx, ra); + + if (unlikely(access.size1 !=3D size)) { + /* The access crosses page boundaries. */ + access.vaddr2 =3D wrap_address(env, vaddr + access.size1); + access.size2 =3D size - access.size1; + access.haddr2 =3D probe_access(env, access.vaddr2, access.size2, + access_type, mmu_idx, ra); + } + return access; +} + +static void access_memset_idx(CPUS390XState *env, vaddr vaddr, uint8_t byt= e, + int size, int mmu_idx, uintptr_t ra) +{ + S390Access desta =3D access_prepare_idx(env, vaddr, size, MMU_DATA_STO= RE, + mmu_idx, ra); +#ifdef CONFIG_USER_ONLY + g_assert(desta.haddr1 && (desta.haddr2 || !desta.size2)); + memset(desta.haddr1, byte, desta.size1); + memset(desta.haddr2, byte, desta.size2); +#else + TCGMemOpIdx oi =3D make_memop_idx(MO_UB, mmu_idx); + int i; =20 - while (l > 0) { - void *p =3D tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx); - if (p) { - /* Access to the whole page in write mode granted. */ - uint32_t l_adj =3D adj_len_to_page(l, dest); - memset(p, byte, l_adj); - dest +=3D l_adj; - l -=3D l_adj; - } else { - /* We failed to get access to the whole page. The next write - access will likely fill the QEMU TLB for the next iteration= . */ - cpu_stb_data_ra(env, dest, byte, ra); - dest++; - l--; + if (likely(desta.haddr1)) { + memset(desta.haddr1, byte, desta.size1); + } else { + for (i =3D 0; i < desta.size1; i++) { + helper_ret_stb_mmu(env, desta.vaddr1 + i, byte, oi, ra); + } + } + if (likely(!desta.size2)) { + return; + } + if (likely(desta.haddr2)) { + memset(desta.haddr2, byte, desta.size2); + } else { + for (i =3D 0; i < desta.size2; i++) { + helper_ret_stb_mmu(env, desta.vaddr2 + i, byte, oi, ra); } } +#endif +} + +static void access_memset(CPUS390XState *env, vaddr vaddr, uint8_t byte, + int size, uintptr_t ra) +{ + int mmu_idx =3D cpu_mmu_index(env, false); + + access_memset_idx(env, vaddr, byte, size, mmu_idx, ra); } =20 #ifndef CONFIG_USER_ONLY @@ -267,7 +322,7 @@ static uint32_t do_helper_xc(CPUS390XState *env, uint32= _t l, uint64_t dest, =20 /* xor with itself is the same as memset(0) */ if (src =3D=3D dest) { - fast_memset(env, dest, 0, l + 1, ra); + access_memset(env, dest, 0, l + 1, ra); return 0; } =20 @@ -329,7 +384,7 @@ static uint32_t do_helper_mvc(CPUS390XState *env, uint3= 2_t l, uint64_t dest, * behave like memmove(). */ if (dest =3D=3D src + 1) { - fast_memset(env, dest, cpu_ldub_data_ra(env, src, ra), l, ra); + access_memset(env, dest, cpu_ldub_data_ra(env, src, ra), l, ra); } else if (!is_destructive_overlap(env, dest, src, l)) { fast_memmove(env, dest, src, l, ra); } else { @@ -798,7 +853,7 @@ static inline uint32_t do_mvcl(CPUS390XState *env, } else if (wordsize =3D=3D 1) { /* Pad the remaining area */ *destlen -=3D len; - fast_memset(env, *dest, pad, len, ra); + access_memset(env, *dest, pad, len, ra); *dest =3D wrap_address(env, *dest + len); } else { /* The remaining length selects the padding byte. */ @@ -852,7 +907,7 @@ uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, = uint32_t r2) while (destlen) { cur_len =3D MIN(destlen, 2048); if (!srclen) { - fast_memset(env, dest, pad, cur_len, ra); + access_memset(env, dest, pad, cur_len, ra); } else { cur_len =3D MIN(cur_len, srclen); =20 --=20 2.21.0