[PATCH] docs: Add/Improve function documentation for NUMA code

Bernhard Kaindl posted 1 patch 2 months, 3 weeks ago
Patches applied successfully (tree, apply log)
git fetch https://gitlab.com/xen-project/patchew/xen tags/patchew/88fecfc05c551b0329b555ef570feeb718a8c021.1727279356.git.bernhard.kaindl@cloud.com
xen/arch/x86/srat.c      | 45 ++++++++++++++++++++++++++++++++++++++--
xen/arch/x86/x86_64/mm.c | 22 ++++++++++++++++++--
xen/common/numa.c        | 27 ++++++++++++++++++++++--
3 files changed, 88 insertions(+), 6 deletions(-)
[PATCH] docs: Add/Improve function documentation for NUMA code
Posted by Bernhard Kaindl 2 months, 3 weeks ago
---
 xen/arch/x86/srat.c      | 45 ++++++++++++++++++++++++++++++++++++++--
 xen/arch/x86/x86_64/mm.c | 22 ++++++++++++++++++--
 xen/common/numa.c        | 27 ++++++++++++++++++++++--
 3 files changed, 88 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
index 688f410287..a7f6b79324 100644
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -51,6 +51,22 @@ nodeid_t pxm_to_node(unsigned pxm)
 	return NUMA_NO_NODE;
 }
 
+/**
+ * @brief Set up a NUMA node based on the given proximity domain (pxm).
+ *
+ * If the proximity domain is already found in the pxm2node array, this function
+ * returns the node ID associated with the given proximity domain.
+ *
+ * Otherwise, it assigns a new node ID to the proximity domain and returns it.
+ * If the maximum number of nodes has been reached, it returns an error code.
+ *
+ * @param pxm The proximity domain to set up or to find in the pxm2node array.
+ * @return The node ID associated with the given proximity domain,
+ *         or NUMA_NO_NODE if the setup fails
+ *         or the maximum number of nodes has been reached.
+ *
+ * @note Uses a static variable to assign the next NUMA node ID to be assigned.
+ */
 nodeid_t setup_node(unsigned pxm)
 {
 	nodeid_t node;
@@ -212,7 +228,19 @@ acpi_numa_processor_affinity_init(const struct acpi_srat_cpu_affinity *pa)
 		       pxm, pa->apic_id, node);
 }
 
-/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
+/**
+ * @brief Initialize memory affinity for an SRAT memory affinity structure.
+ * @param ma Pointer to the ACPI SRAT memory affinity structure to be parsed.
+ *
+ * Callback for parsing of the Proximity Domain <-> Memory Area mappings
+ * provided by the ACPI SRAT(System Resource Affinity Table)
+ *
+ * This function is called during the initialization phase and is responsible
+ * for setting up memory affinity information for NUMA nodes
+ * based on the provided ACPI SRAT entries.
+ *
+ * If valid, it updates the memory block information for the specified node.
+ */
 void __init
 acpi_numa_memory_affinity_init(const struct acpi_srat_mem_affinity *ma)
 {
@@ -314,16 +342,29 @@ void __init srat_parse_regions(paddr_t addr)
 	pfn_pdx_hole_setup(mask >> PAGE_SHIFT);
 }
 
+/**
+ * @brief Find the proximity domain of a given NUMA node ID
+ *
+ * @param node The NUMA node ID to look up.
+ * @return The proximity domain of the given NUMA node ID, or 0 if not found
+ *
+ * This funciton uses the pxm2node array to find the proximity domain:
+ * The pxm2node array maps proximity domains to NUMA nodes and is indexed by the
+ * proximity domain. Each entry contains the proximity domain and the NUMA node.
+ */
 unsigned int numa_node_to_arch_nid(nodeid_t n)
 {
 	unsigned int i;
 
+	/* If the NUMA node is found using the index, return the pxm directly: */
 	if ((n < ARRAY_SIZE(pxm2node)) && (pxm2node[n].node == n))
 		return pxm2node[n].pxm;
+
+	/* Else, scan over the pxm2node array to find the architecture node ID: */
 	for (i = 0; i < ARRAY_SIZE(pxm2node); i++)
 		if (pxm2node[i].node == n)
 			return pxm2node[i].pxm;
-	return 0;
+	return 0;  /* If the node ID is not found, return 0. */
 }
 
 u8 __node_distance(nodeid_t a, nodeid_t b)
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 0d8e602529..e8571000b2 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1218,8 +1218,26 @@ static int mem_hotadd_check(unsigned long spfn, unsigned long epfn)
 }
 
 /*
- * A bit paranoid for memory allocation failure issue since
- * it may be reason for memory add
+ * @brief Add a contiguous range of pages to the Xen hypervisor heap
+ *
+ * Update the machine-to-phys (m2p) and frame tables, the max_page, max_pdx and
+ * total_pages values, and set the page table entries for the new memory range.
+ *
+ * Also update the node_start_pfn and node_spanned_pages fields in the node_data
+ * structure for the NUMA node that the memory range belongs to.
+ *
+ * If the NUMA node is offline, set the node's online flag.
+ *
+ * If the hardware domain uses an IOMMU and the IOMMU pagetable
+ * is not using the HW domain's P2M table or is not kept in sync,
+ * also update the IOMMU maps for the newly added memory.
+ *
+ * When memory allocations or adding the memory fails, revert any changes made.
+ *
+ * @param spfn The starting page frame number of the range.
+ * @param epfn The ending page frame number of the range.
+ * @param pxm The proximity domain of the range, which represents the NUMA node.
+ * @return 0 on success, or a negative error code on failure.
  */
 int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
 {
diff --git a/xen/common/numa.c b/xen/common/numa.c
index ce3991929c..5db0a6745e 100644
--- a/xen/common/numa.c
+++ b/xen/common/numa.c
@@ -140,6 +140,18 @@ bool __init numa_memblks_available(void)
 }
 
 /*
+ * @brief Update the memory blocks of a NUMA node.
+ *
+ * Check for overlaps and interleaves with existing memory blocks for the node.
+ * If none, update the node's memory range and maintain the sorting of blocks.
+ *
+ * @param node The node ID for which memory blocks are being updated.
+ * @param arch_nid The architecture node ID associated with the memory.
+ * @param start The starting physical address of the memory block.
+ * @param size The size of the memory block.
+ * @param hotplug Indicates if the memory is being hotplugged.
+ * @return true if the memory block was successfully added, false otherwise.
+ *
  * This function will be called by NUMA memory affinity initialization to
  * update NUMA node's memory range. In this function, we assume all memory
  * regions belonging to a single node are in one chunk. Holes (or MMIO
@@ -264,8 +276,17 @@ bool __init numa_update_node_memblks(nodeid_t node, unsigned int arch_nid,
 }
 
 /*
+ * @brief Check if all RAM ranges are covered by NUMA nodes.
+ *
  * Sanity check to catch more bad SRATs (they are amazingly common).
  * Make sure the PXMs cover all memory.
+ *
+ * This function iterates through the memory map and checks if each RAM range
+ * is covered by any of the NUMA nodes. If a RAM range is not covered by any
+ * node, it logs an error and returns false. If all RAM ranges are covered,
+ * it returns true.
+ *
+ * @return true if all RAM ranges are covered by NUMA nodes, false otherwise.
  */
 static bool __init nodes_cover_memory(void)
 {
@@ -291,18 +312,20 @@ static bool __init nodes_cover_memory(void)
 
         do {
             found = false;
+            /* Loop over the NUMA nodes and if check they cover this mapping */
             for_each_node_mask ( j, memory_nodes_parsed )
+                /* Check if this node's spanned pages are inside this mapping */
                 if ( start < numa_nodes[j].end && end > numa_nodes[j].start )
                 {
                     if ( start >= numa_nodes[j].start )
                     {
-                        start = numa_nodes[j].end;
+                        start = numa_nodes[j].end;  /* Check following memory */
                         found = true;
                     }
 
                     if ( end <= numa_nodes[j].end )
                     {
-                        end = numa_nodes[j].start;
+                        end = numa_nodes[j].start;  /* check preceding memory */
                         found = true;
                     }
                 }
-- 
2.43.0