From: Ruslan Ruslichenko <Ruslan_Ruslichenko@epam.com>
Implement the core access handlers for the Remote Port Memory
Master device, which enables QEMU to initiate memory-mapped
transaction to remote peers.
The patch adds support for:
- Translation of QEMU MemoryTransactions into Remote Port
read/write packets
- Support for propagating of transaction attributes
(Secure, MasterID)
- Synchronous response waiting with optional timeout
- Add trace events for monitoring TX/RX bus transactions
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
Signed-off-by: Takahiro Nakata <takahiro.nakata.wr@renesas.com>
Signed-off-by: Ruslan Ruslichenko <Ruslan_Ruslichenko@epam.com>
---
hw/core/remote-port-memory-master.c | 167 +++++++++++++++++++++++++++-
hw/core/remote-port.c | 5 +
hw/core/trace-events | 4 +
include/hw/core/remote-port.h | 11 ++
4 files changed, 185 insertions(+), 2 deletions(-)
diff --git a/hw/core/remote-port-memory-master.c b/hw/core/remote-port-memory-master.c
index d714f66ddf..4a6c43423d 100644
--- a/hw/core/remote-port-memory-master.c
+++ b/hw/core/remote-port-memory-master.c
@@ -41,10 +41,173 @@
#define RP_MAX_ACCESS_SIZE 4096
+static int rp_mm_get_timeout(MemoryTransaction *tr)
+{
+ RemotePortMap *map = tr->opaque;
+ RemotePortMemoryMaster *s;
+
+ if (!map || !map->parent ||
+ !object_dynamic_cast(OBJECT(map->parent),
+ TYPE_REMOTE_PORT_MEMORY_MASTER)) {
+ return 0;
+ }
+ s = REMOTE_PORT_MEMORY_MASTER(map->parent);
+ return s->rp_timeout;
+}
+
+MemTxResult rp_mm_access_with_def_attr(RemotePort *rp, uint32_t rp_dev,
+ struct rp_peer_state *peer,
+ MemoryTransaction *tr,
+ bool relative, uint64_t offset,
+ uint32_t def_attr)
+{
+ uint64_t addr = tr->addr;
+ RemotePortRespSlot *rsp_slot;
+ RemotePortDynPkt *rsp;
+ struct {
+ struct rp_pkt_busaccess_ext_base pkt;
+ uint8_t reserved[RP_MAX_ACCESS_SIZE];
+ } pay;
+ uint8_t *data = rp_busaccess_tx_dataptr(peer, &pay.pkt);
+ struct rp_encode_busaccess_in in = {0};
+ int i;
+ int len;
+ int rp_timeout = rp_mm_get_timeout(tr);
+ MemTxResult ret;
+
+ if (rp_timeout) {
+ return MEMTX_ERROR;
+ }
+ DB_PRINT_L(0, "addr: %" HWADDR_PRIx " data: %" PRIx64 "\n",
+ addr, tr->data.u64);
+
+ if (tr->rw) {
+ /* Data up to 8 bytes is passed as values. */
+ if (tr->size <= 8) {
+ for (i = 0; i < tr->size; i++) {
+ data[i] = tr->data.u64 >> (i * 8);
+ }
+ } else {
+ memcpy(data, tr->data.p8, tr->size);
+ }
+ }
+
+ addr += relative ? 0 : offset;
+
+ in.cmd = tr->rw ? RP_CMD_write : RP_CMD_read;
+ in.id = rp_new_id(rp);
+ in.dev = rp_dev;
+ in.clk = rp_normalized_vmclk(rp);
+ in.master_id = tr->attr.requester_id;
+ in.addr = addr;
+ in.attr = def_attr;
+ in.attr |= tr->attr.secure ? RP_BUS_ATTR_SECURE : 0;
+ in.size = tr->size;
+ in.stream_width = tr->size;
+ len = rp_encode_busaccess(peer, &pay.pkt, &in);
+ len += tr->rw ? tr->size : 0;
+
+ trace_remote_port_memory_master_tx_busaccess(rp_cmd_to_string(in.cmd),
+ in.id, in.flags, in.dev, in.addr, in.size, in.attr);
+
+ rp_rsp_mutex_lock(rp);
+ rp_write(rp, (void *) &pay, len);
+
+ if (!rp_timeout) {
+ rsp_slot = rp_dev_wait_resp(rp, in.dev, in.id);
+ } else {
+ rsp_slot = rp_dev_timed_wait_resp(rp, in.dev, in.id, rp_timeout);
+ if (rsp_slot->valid == false) {
+ /*
+ * Timeout error
+ */
+ rp_rsp_mutex_unlock(rp);
+ return MEMTX_ERROR;
+ }
+ }
+ rsp = &rsp_slot->rsp;
+
+ /* We dont support out of order answers yet. */
+ assert(rsp->pkt->hdr.id == in.id);
+
+ switch (rp_get_busaccess_response(rsp->pkt)) {
+ case RP_RESP_OK:
+ ret = MEMTX_OK;
+ break;
+ case RP_RESP_ADDR_ERROR:
+ ret = MEMTX_DECODE_ERROR;
+ break;
+ default:
+ ret = MEMTX_ERROR;
+ break;
+ }
+
+ if (ret == MEMTX_OK && !tr->rw) {
+ data = rp_busaccess_rx_dataptr(peer, &rsp->pkt->busaccess_ext_base);
+ /* Data up to 8 bytes is return as values. */
+ if (tr->size <= 8) {
+ for (i = 0; i < tr->size; i++) {
+ tr->data.u64 |= ((uint64_t) data[i]) << (i * 8);
+ }
+ } else {
+ memcpy(tr->data.p8, data, tr->size);
+ }
+ }
+
+ trace_remote_port_memory_master_rx_busaccess(
+ rp_cmd_to_string(rsp->pkt->hdr.cmd), rsp->pkt->hdr.id,
+ rsp->pkt->hdr.flags, rsp->pkt->hdr.dev, rsp->pkt->busaccess.addr,
+ rsp->pkt->busaccess.len, rsp->pkt->busaccess.attributes);
+
+ if (rp_timeout) {
+ for (i = 0; i < ARRAY_SIZE(rp->dev_state[rp_dev].rsp_queue); i++) {
+ if (rp->dev_state[rp_dev].rsp_queue[i].used &&
+ rp->dev_state[rp_dev].rsp_queue[i].valid) {
+ rp_resp_slot_done(rp, &rp->dev_state[rp_dev].rsp_queue[i]);
+ }
+ }
+ } else {
+ rp_resp_slot_done(rp, rsp_slot);
+ }
+ rp_rsp_mutex_unlock(rp);
+
+ /*
+ * For strongly ordered or transactions that don't allow Early Acking,
+ * we need to drain the pending RP processing queue here. This is
+ * because RP handles responses in parallel with normal requests so
+ * they may get reordered. This becomes visible for example with reads
+ * to read-to-clear registers that clear interrupts. Even though the
+ * lowering of the interrupt-wires arrives to us before the read-resp,
+ * we may handle the response before the wire update, resulting in
+ * spurious interrupts.
+ *
+ * This has some room for optimization but for now we use the big hammer
+ * and drain the entire qeueue.
+ */
+ rp_process(rp);
+
+ /* Reads are sync-points, roll the sync timer. */
+ rp_restart_sync_timer(rp);
+ DB_PRINT_L(1, "\n");
+ return ret;
+}
+
+MemTxResult rp_mm_access(RemotePort *rp, uint32_t rp_dev,
+ struct rp_peer_state *peer,
+ MemoryTransaction *tr,
+ bool relative, uint64_t offset)
+{
+ return rp_mm_access_with_def_attr(rp, rp_dev, peer, tr, relative, offset,
+ 0);
+}
+
static MemTxResult rp_access(MemoryTransaction *tr)
{
- /* TBD */
- return MEMTX_OK;
+ RemotePortMap *map = tr->opaque;
+ RemotePortMemoryMaster *s = map->parent;
+
+ return rp_mm_access(s->rp, s->rp_dev, s->peer, tr, s->relative,
+ map->offset);
}
static const MemoryRegionOps rp_ops_template = {
diff --git a/hw/core/remote-port.c b/hw/core/remote-port.c
index c783a12153..41ac1b46dc 100644
--- a/hw/core/remote-port.c
+++ b/hw/core/remote-port.c
@@ -62,6 +62,11 @@ static void rp_pkt_dump(const char *prefix, const char *buf, size_t len)
qemu_hexdump(stdout, prefix, buf, len);
}
+uint32_t rp_new_id(RemotePort *s)
+{
+ return qatomic_fetch_inc(&s->current_id);
+}
+
void rp_rsp_mutex_lock(RemotePort *s)
{
qemu_mutex_lock(&s->rsp_mutex);
diff --git a/hw/core/trace-events b/hw/core/trace-events
index 2cf085ac66..6332002d65 100644
--- a/hw/core/trace-events
+++ b/hw/core/trace-events
@@ -32,3 +32,7 @@ clock_set_mul_div(const char *clk, uint32_t oldmul, uint32_t mul, uint32_t olddi
# cpu-common.c
cpu_reset(int cpu_index) "%d"
+
+# remote-port-memory-master.c
+remote_port_memory_master_tx_busaccess(const char *cmd, uint32_t id, uint32_t flags, uint32_t dev, uint64_t addr, uint32_t len, uint64_t attr) "cmd=%s, id=0x%"PRIx32", flags=0x%"PRIx32", dev=0x%"PRIx32", addr=0x%"PRIx64", len=0x%"PRIx32", attr=0x%"PRIx64
+remote_port_memory_master_rx_busaccess(const char *cmd, uint32_t id, uint32_t flags, uint32_t dev, uint64_t addr, uint32_t len, uint64_t attr) "cmd=%s, id=0x%"PRIx32", flags=0x%"PRIx32", dev=0x%"PRIx32", addr=0x%"PRIx64", len=0x%"PRIx32", attr=0x%"PRIx64
diff --git a/include/hw/core/remote-port.h b/include/hw/core/remote-port.h
index e5dfaf7c3b..172c4b6204 100644
--- a/include/hw/core/remote-port.h
+++ b/include/hw/core/remote-port.h
@@ -153,4 +153,15 @@ ssize_t rp_write(RemotePort *s, const void *buf, size_t count);
struct rp_peer_state *rp_get_peer(RemotePort *s);
+uint32_t rp_new_id(RemotePort *s);
+
+static inline void rp_resp_slot_done(RemotePort *s,
+ RemotePortRespSlot *rsp_slot)
+{
+ rp_dpkt_invalidate(&rsp_slot->rsp);
+ rsp_slot->id = ~0;
+ rsp_slot->used = false;
+ rsp_slot->valid = false;
+}
+
#endif
--
2.43.0