From nobody Tue Nov 18 10:39:17 2025 Delivered-To: importer@patchew.org Authentication-Results: mx.zohomail.com; spf=pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org ARC-Seal: i=1; a=rsa-sha256; t=1609343609; cv=none; d=zohomail.com; s=zohoarc; b=grt5+RiPZms2cBdp4P5gd5s7Yq+1K2LxHOJ6ceSdKILqmqTCFD68mh+ijOY34tEpHBGmRSh0tsIA+1nwMiaxD3z3RJLlFHipPnIMLeG80WLFImgR9TygL80iDba4aMl81vc2RGI8L1moy5Hjz4zxpd4aPsHZA32u0eGzRFwB4Zk= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1609343609; h=Content-Transfer-Encoding:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To; bh=eSu5ObE9Om8g4zsr8z0kjz2iUtKSZTx3WUJQjTreEsg=; b=YjsIQpYUmvyuaHGa7vNqcCtXf1+1hHQS44gUKU8tHp0J8NIk+F1471hlTxbc1oTRpewGhhhEaXP+IJ1Pk9cZRhnJjGLgcaaTmZXo0/ztVdKZc53RMdM3liBhCxedmyFx2w7GoKIH06osmRk/6Xc3y2CXWI44Z5GCTZfcrEScmJY= ARC-Authentication-Results: i=1; mx.zohomail.com; spf=pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) by mx.zohomail.com with SMTPS id 1609343609142922.6795011319892; Wed, 30 Dec 2020 07:53:29 -0800 (PST) Received: from localhost ([::1]:49764 helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1kudmx-0008SE-S5 for importer@patchew.org; Wed, 30 Dec 2020 10:53:27 -0500 Received: from eggs.gnu.org ([2001:470:142:3::10]:51378) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1kudZn-0001iH-Ma for qemu-devel@nongnu.org; Wed, 30 Dec 2020 10:39:52 -0500 Received: from mail.ilande.co.uk ([2001:41c9:1:41f::167]:50936 helo=mail.default.ilande.uk0.bigv.io) by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1kudZj-0002J6-Pr for qemu-devel@nongnu.org; Wed, 30 Dec 2020 10:39:51 -0500 Received: from host86-148-34-1.range86-148.btcentralplus.com ([86.148.34.1] helo=kentang.home) by mail.default.ilande.uk0.bigv.io with esmtpsa (TLS1.3:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.92) (envelope-from ) id 1kudZU-00070L-UU; Wed, 30 Dec 2020 15:39:38 +0000 From: Mark Cave-Ayland To: qemu-devel@nongnu.org, pbonzini@redhat.com, fam@euphon.net, laurent@vivier.eu Date: Wed, 30 Dec 2020 15:37:43 +0000 Message-Id: <20201230153745.30241-24-mark.cave-ayland@ilande.co.uk> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20201230153745.30241-1-mark.cave-ayland@ilande.co.uk> References: <20201230153745.30241-1-mark.cave-ayland@ilande.co.uk> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-SA-Exim-Connect-IP: 86.148.34.1 X-SA-Exim-Mail-From: mark.cave-ayland@ilande.co.uk Subject: [PATCH 23/25] esp: use FIFO for PDMA transfers between initiator and device X-SA-Exim-Version: 4.2.1 (built Wed, 08 May 2019 21:11:16 +0000) X-SA-Exim-Scanned: Yes (on mail.default.ilande.uk0.bigv.io) Received-SPF: pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) client-ip=209.51.188.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Received-SPF: pass client-ip=2001:41c9:1:41f::167; envelope-from=mark.cave-ayland@ilande.co.uk; helo=mail.default.ilande.uk0.bigv.io X-Spam_score_int: -18 X-Spam_score: -1.9 X-Spam_bar: - X-Spam_report: (-1.9 / 5.0 requ) BAYES_00=-1.9, SPF_HELO_NONE=0.001, SPF_PASS=-0.001 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" Content-Type: text/plain; charset="utf-8" PDMA as implemented on the Quadra 800 uses DREQ to load data into the FIFO up to a maximum of 16 bytes at a time. The MacOS toolbox ROM requires this because it mixes FIFO and PDMA transfers whilst checking the FIFO status and counter registers to ensure success. Signed-off-by: Mark Cave-Ayland --- hw/scsi/esp.c | 104 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 73 insertions(+), 31 deletions(-) diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c index 69c03e59f0..d2a70998aa 100644 --- a/hw/scsi/esp.c +++ b/hw/scsi/esp.c @@ -134,13 +134,8 @@ static void set_pdma(ESPState *s, enum pdma_origin_id = origin) =20 static uint8_t esp_pdma_read(ESPState *s) { - uint32_t dmalen =3D esp_get_tc(s); uint8_t val; =20 - if (dmalen =3D=3D 0) { - return 0; - } - switch (s->pdma_origin) { case TI: if (s->do_cmd) { @@ -161,8 +156,6 @@ static uint8_t esp_pdma_read(ESPState *s) } =20 s->ti_size--; - dmalen--; - esp_set_tc(s, dmalen); =20 return val; } @@ -447,28 +440,71 @@ static void esp_dma_done(ESPState *s) static void do_dma_pdma_cb(ESPState *s) { int to_device =3D ((s->rregs[ESP_RSTAT] & 7) =3D=3D STAT_DO); + int len; =20 if (s->do_cmd) { s->ti_size =3D 0; s->cmdlen =3D 0; s->do_cmd =3D 0; do_cmd(s, s->cmdbuf); + esp_lower_drq(s); return; } - if (s->async_len =3D=3D 0) { - scsi_req_continue(s->current_req); - /* - * If there is still data to be read from the device then - * complete the DMA operation immediately. Otherwise defer - * until the scsi layer has completed. - */ - if (to_device || esp_get_tc(s) !=3D 0 || s->ti_size =3D=3D 0) { + + if (to_device) { + /* Copy FIFO data to device */ + len =3D MIN(s->ti_wptr, TI_BUFSZ); + memcpy(s->async_buf, s->ti_buf, len); + s->ti_wptr =3D 0; + s->ti_rptr =3D 0; + s->async_buf +=3D len; + s->async_len -=3D len; + if (s->async_len =3D=3D 0) { + scsi_req_continue(s->current_req); + /* + * If there is still data to be read from the device then + * complete the DMA operation immediately. Otherwise defer + * until the scsi layer has completed. + */ return; } - } =20 - /* Partially filled a scsi buffer. Complete immediately. */ - esp_dma_done(s); + if (esp_get_tc(s) =3D=3D 0) { + esp_lower_drq(s); + esp_dma_done(s); + } + + return; + } else { + if (s->async_len =3D=3D 0) { + scsi_req_continue(s->current_req); + /* + * If there is still data to be read from the device then + * complete the DMA operation immediately. Otherwise defer + * until the scsi layer has completed. + */ + if (esp_get_tc(s) !=3D 0) { + return; + } + } + + if (esp_get_tc(s) !=3D 0) { + /* Copy device data to FIFO */ + s->ti_wptr =3D 0; + s->ti_rptr =3D 0; + len =3D MIN(s->async_len, TI_BUFSZ); + memcpy(s->ti_buf, s->async_buf, len); + s->ti_wptr +=3D len; + s->async_buf +=3D len; + s->async_len -=3D len; + esp_set_tc(s, esp_get_tc(s) - len); + return; + } + + /* Partially filled a scsi buffer. Complete immediately. */ + esp_lower_drq(s); + esp_dma_done(s); + } } =20 static void esp_do_dma(ESPState *s) @@ -511,7 +547,7 @@ static void esp_do_dma(ESPState *s) if (s->dma_memory_read) { s->dma_memory_read(s->dma_opaque, s->async_buf, len); } else { - set_pdma(s, ASYNC); + set_pdma(s, TI); s->pdma_cb =3D do_dma_pdma_cb; esp_raise_drq(s); return; @@ -520,9 +556,19 @@ static void esp_do_dma(ESPState *s) if (s->dma_memory_write) { s->dma_memory_write(s->dma_opaque, s->async_buf, len); } else { - set_pdma(s, ASYNC); + /* Copy device data to FIFO */ + len =3D MIN(len, TI_BUFSZ - s->ti_wptr); + memcpy(&s->ti_buf[s->ti_wptr], s->async_buf, len); + s->ti_wptr +=3D len; + s->async_buf +=3D len; + s->async_len -=3D len; + esp_set_tc(s, esp_get_tc(s) - len); + set_pdma(s, TI); s->pdma_cb =3D do_dma_pdma_cb; esp_raise_drq(s); + + /* Indicate transfer to FIFO is complete */ + s->rregs[ESP_RSTAT] |=3D STAT_TC; return; } } @@ -548,6 +594,7 @@ static void esp_do_dma(ESPState *s) =20 /* Partially filled a scsi buffer. Complete immediately. */ esp_dma_done(s); + esp_lower_drq(s); } =20 static void esp_report_command_complete(ESPState *s, uint32_t status) @@ -564,6 +611,7 @@ static void esp_report_command_complete(ESPState *s, ui= nt32_t status) s->status =3D status; s->rregs[ESP_RSTAT] =3D STAT_ST; esp_dma_done(s); + esp_lower_drq(s); if (s->current_req) { scsi_req_unref(s->current_req); s->current_req =3D NULL; @@ -607,6 +655,7 @@ void esp_transfer_data(SCSIRequest *req, uint32_t len) * completion interrupt is deferred to here. */ esp_dma_done(s); + esp_lower_drq(s); } } =20 @@ -944,10 +993,8 @@ static void sysbus_esp_pdma_write(void *opaque, hwaddr= addr, break; } dmalen =3D esp_get_tc(s); - if (dmalen =3D=3D 0 && s->pdma_cb) { - esp_lower_drq(s); + if (dmalen =3D=3D 0 || (s->ti_wptr =3D=3D TI_BUFSZ)) { s->pdma_cb(s); - s->pdma_cb =3D NULL; } } =20 @@ -956,14 +1003,10 @@ static uint64_t sysbus_esp_pdma_read(void *opaque, h= waddr addr, { SysBusESPState *sysbus =3D opaque; ESPState *s =3D &sysbus->esp; - uint32_t dmalen =3D esp_get_tc(s); uint64_t val =3D 0; =20 trace_esp_pdma_read(size); =20 - if (dmalen =3D=3D 0) { - return 0; - } switch (size) { case 1: val =3D esp_pdma_read(s); @@ -973,11 +1016,10 @@ static uint64_t sysbus_esp_pdma_read(void *opaque, h= waddr addr, val =3D (val << 8) | esp_pdma_read(s); break; } - dmalen =3D esp_get_tc(s); - if (dmalen =3D=3D 0 && s->pdma_cb) { - esp_lower_drq(s); + if (s->ti_rptr =3D=3D s->ti_wptr) { + s->ti_wptr =3D 0; + s->ti_rptr =3D 0; s->pdma_cb(s); - s->pdma_cb =3D NULL; } return val; } --=20 2.20.1