Contributors: 3
Author Tokens Token Proportion Commits Commit Proportion
Dan J Williams 459 98.92% 6 75.00%
Christoph Hellwig 3 0.65% 1 12.50%
linyujun 2 0.43% 1 12.50%
Total 464 8


// SPDX-License-Identifier: GPL-2.0
#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/module.h>
#include <linux/dax.h>
#include <linux/mm.h>

static bool nohmem;
module_param_named(disable, nohmem, bool, 0444);

static struct resource hmem_active = {
	.name = "HMEM devices",
	.start = 0,
	.end = -1,
	.flags = IORESOURCE_MEM,
};

void hmem_register_device(int target_nid, struct resource *r)
{
	/* define a clean / non-busy resource for the platform device */
	struct resource res = {
		.start = r->start,
		.end = r->end,
		.flags = IORESOURCE_MEM,
		.desc = IORES_DESC_SOFT_RESERVED,
	};
	struct platform_device *pdev;
	struct memregion_info info;
	int rc, id;

	if (nohmem)
		return;

	rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
			IORES_DESC_SOFT_RESERVED);
	if (rc != REGION_INTERSECTS)
		return;

	id = memregion_alloc(GFP_KERNEL);
	if (id < 0) {
		pr_err("memregion allocation failure for %pr\n", &res);
		return;
	}

	pdev = platform_device_alloc("hmem", id);
	if (!pdev) {
		pr_err("hmem device allocation failure for %pr\n", &res);
		goto out_pdev;
	}

	if (!__request_region(&hmem_active, res.start, resource_size(&res),
			      dev_name(&pdev->dev), 0)) {
		dev_dbg(&pdev->dev, "hmem range %pr already active\n", &res);
		goto out_active;
	}

	pdev->dev.numa_node = numa_map_to_online_node(target_nid);
	info = (struct memregion_info) {
		.target_node = target_nid,
	};
	rc = platform_device_add_data(pdev, &info, sizeof(info));
	if (rc < 0) {
		pr_err("hmem memregion_info allocation failure for %pr\n", &res);
		goto out_resource;
	}

	rc = platform_device_add_resources(pdev, &res, 1);
	if (rc < 0) {
		pr_err("hmem resource allocation failure for %pr\n", &res);
		goto out_resource;
	}

	rc = platform_device_add(pdev);
	if (rc < 0) {
		dev_err(&pdev->dev, "device add failed for %pr\n", &res);
		goto out_resource;
	}

	return;

out_resource:
	__release_region(&hmem_active, res.start, resource_size(&res));
out_active:
	platform_device_put(pdev);
out_pdev:
	memregion_free(id);
}

static __init int hmem_register_one(struct resource *res, void *data)
{
	hmem_register_device(phys_to_target_node(res->start), res);

	return 0;
}

static __init int hmem_init(void)
{
	walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
			IORESOURCE_MEM, 0, -1, NULL, hmem_register_one);
	return 0;
}

/*
 * As this is a fallback for address ranges unclaimed by the ACPI HMAT
 * parsing it must be at an initcall level greater than hmat_init().
 */
late_initcall(hmem_init);