Skip to content

Commit a4574f6

Browse files
djbwtorvalds
authored andcommitted
mm/memremap_pages: convert to 'struct range'
The 'struct resource' in 'struct dev_pagemap' is only used for holding resource span information. The other fields, 'name', 'flags', 'desc', 'parent', 'sibling', and 'child' are all unused wasted space. This is in preparation for introducing a multi-range extension of devm_memremap_pages(). The bulk of this change is unwinding all the places internal to libnvdimm that used 'struct resource' unnecessarily, and replacing instances of 'struct dev_pagemap'.res with 'struct dev_pagemap'.range. P2PDMA had a minor usage of the resource flags field, but only to report failures with "%pR". That is replaced with an open coded print of the range. [dan.carpenter@oracle.com: mm/hmm/test: use after free in dmirror_allocate_chunk()] Link: https://lkml.kernel.org/r/20200926121402.GA7467@kadam Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen] Cc: Paul Mackerras <paulus@ozlabs.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Vishal Verma <vishal.l.verma@intel.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: David Airlie <airlied@linux.ie> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Juergen Gross <jgross@suse.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: "Jérôme Glisse" <jglisse@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brice Goglin <Brice.Goglin@inria.fr> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Hulk Robot <hulkci@huawei.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Jason Yan <yanaijie@huawei.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Jia He <justin.he@arm.com> Cc: Joao Martins <joao.m.martins@oracle.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: kernel test robot <lkp@intel.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Wei Yang <richard.weiyang@linux.alibaba.com> Cc: Will Deacon <will@kernel.org> Link: https://lkml.kernel.org/r/159643103173.4062302.768998885691711532.stgit@dwillia2-desk3.amr.corp.intel.com Link: https://lkml.kernel.org/r/160106115761.30709.13539840236873663620.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent fcffb6a commit a4574f6

File tree

21 files changed

+195
-165
lines changed

21 files changed

+195
-165
lines changed

arch/powerpc/kvm/book3s_hv_uvmem.c

+7-6
Original file line numberDiff line numberDiff line change
@@ -687,9 +687,9 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
687687
struct kvmppc_uvmem_page_pvt *pvt;
688688
unsigned long pfn_last, pfn_first;
689689

690-
pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT;
690+
pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
691691
pfn_last = pfn_first +
692-
(resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT);
692+
(range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
693693

694694
spin_lock(&kvmppc_uvmem_bitmap_lock);
695695
bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
@@ -1007,7 +1007,7 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
10071007
static void kvmppc_uvmem_page_free(struct page *page)
10081008
{
10091009
unsigned long pfn = page_to_pfn(page) -
1010-
(kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT);
1010+
(kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
10111011
struct kvmppc_uvmem_page_pvt *pvt;
10121012

10131013
spin_lock(&kvmppc_uvmem_bitmap_lock);
@@ -1170,7 +1170,8 @@ int kvmppc_uvmem_init(void)
11701170
}
11711171

11721172
kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
1173-
kvmppc_uvmem_pgmap.res = *res;
1173+
kvmppc_uvmem_pgmap.range.start = res->start;
1174+
kvmppc_uvmem_pgmap.range.end = res->end;
11741175
kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
11751176
/* just one global instance: */
11761177
kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
@@ -1205,7 +1206,7 @@ void kvmppc_uvmem_free(void)
12051206
return;
12061207

12071208
memunmap_pages(&kvmppc_uvmem_pgmap);
1208-
release_mem_region(kvmppc_uvmem_pgmap.res.start,
1209-
resource_size(&kvmppc_uvmem_pgmap.res));
1209+
release_mem_region(kvmppc_uvmem_pgmap.range.start,
1210+
range_len(&kvmppc_uvmem_pgmap.range));
12101211
kfree(kvmppc_uvmem_bitmap);
12111212
}

drivers/dax/bus.c

+5-5
Original file line numberDiff line numberDiff line change
@@ -515,7 +515,7 @@ static void dax_region_unregister(void *region)
515515
}
516516

517517
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
518-
struct resource *res, int target_node, unsigned int align,
518+
struct range *range, int target_node, unsigned int align,
519519
unsigned long flags)
520520
{
521521
struct dax_region *dax_region;
@@ -530,8 +530,8 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
530530
return NULL;
531531
}
532532

533-
if (!IS_ALIGNED(res->start, align)
534-
|| !IS_ALIGNED(resource_size(res), align))
533+
if (!IS_ALIGNED(range->start, align)
534+
|| !IS_ALIGNED(range_len(range), align))
535535
return NULL;
536536

537537
dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
@@ -546,8 +546,8 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
546546
dax_region->target_node = target_node;
547547
ida_init(&dax_region->ida);
548548
dax_region->res = (struct resource) {
549-
.start = res->start,
550-
.end = res->end,
549+
.start = range->start,
550+
.end = range->end,
551551
.flags = IORESOURCE_MEM | flags,
552552
};
553553

drivers/dax/bus.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ void dax_region_put(struct dax_region *dax_region);
1313

1414
#define IORESOURCE_DAX_STATIC (1UL << 0)
1515
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
16-
struct resource *res, int target_node, unsigned int align,
16+
struct range *range, int target_node, unsigned int align,
1717
unsigned long flags);
1818

1919
enum dev_dax_subsys {

drivers/dax/dax-private.h

-5
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,6 @@ struct dev_dax {
6161
struct range range;
6262
};
6363

64-
static inline u64 range_len(struct range *range)
65-
{
66-
return range->end - range->start + 1;
67-
}
68-
6964
static inline struct dev_dax *to_dev_dax(struct device *dev)
7065
{
7166
return container_of(dev, struct dev_dax, dev);

drivers/dax/device.c

+1-2
Original file line numberDiff line numberDiff line change
@@ -416,8 +416,7 @@ int dev_dax_probe(struct dev_dax *dev_dax)
416416
pgmap = devm_kzalloc(dev, sizeof(*pgmap), GFP_KERNEL);
417417
if (!pgmap)
418418
return -ENOMEM;
419-
pgmap->res.start = range->start;
420-
pgmap->res.end = range->end;
419+
pgmap->range = *range;
421420
}
422421
pgmap->type = MEMORY_DEVICE_GENERIC;
423422
addr = devm_memremap_pages(dev, pgmap);

drivers/dax/hmem/hmem.c

+4-1
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,16 @@ static int dax_hmem_probe(struct platform_device *pdev)
1313
struct dev_dax_data data;
1414
struct dev_dax *dev_dax;
1515
struct resource *res;
16+
struct range range;
1617

1718
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1819
if (!res)
1920
return -ENOMEM;
2021

2122
mri = dev->platform_data;
22-
dax_region = alloc_dax_region(dev, pdev->id, res, mri->target_node,
23+
range.start = res->start;
24+
range.end = res->end;
25+
dax_region = alloc_dax_region(dev, pdev->id, &range, mri->target_node,
2326
PMD_SIZE, 0);
2427
if (!dax_region)
2528
return -ENOMEM;

drivers/dax/pmem/core.c

+6-6
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
1111
{
12-
struct resource res;
12+
struct range range;
1313
int rc, id, region_id;
1414
resource_size_t offset;
1515
struct nd_pfn_sb *pfn_sb;
@@ -50,10 +50,10 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
5050
if (rc != 2)
5151
return ERR_PTR(-EINVAL);
5252

53-
/* adjust the dax_region resource to the start of data */
54-
memcpy(&res, &pgmap.res, sizeof(res));
55-
res.start += offset;
56-
dax_region = alloc_dax_region(dev, region_id, &res,
53+
/* adjust the dax_region range to the start of data */
54+
range = pgmap.range;
55+
range.start += offset,
56+
dax_region = alloc_dax_region(dev, region_id, &range,
5757
nd_region->target_node, le32_to_cpu(pfn_sb->align),
5858
IORESOURCE_DAX_STATIC);
5959
if (!dax_region)
@@ -64,7 +64,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
6464
.id = id,
6565
.pgmap = &pgmap,
6666
.subsys = subsys,
67-
.size = resource_size(&res),
67+
.size = range_len(&range),
6868
};
6969
dev_dax = devm_create_dev_dax(&data);
7070

drivers/gpu/drm/nouveau/nouveau_dmem.c

+7-7
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ unsigned long nouveau_dmem_page_addr(struct page *page)
101101
{
102102
struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
103103
unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
104-
chunk->pagemap.res.start;
104+
chunk->pagemap.range.start;
105105

106106
return chunk->bo->offset + off;
107107
}
@@ -249,7 +249,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
249249

250250
chunk->drm = drm;
251251
chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
252-
chunk->pagemap.res = *res;
252+
chunk->pagemap.range.start = res->start;
253+
chunk->pagemap.range.end = res->end;
253254
chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
254255
chunk->pagemap.owner = drm->dev;
255256

@@ -273,7 +274,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
273274
list_add(&chunk->list, &drm->dmem->chunks);
274275
mutex_unlock(&drm->dmem->mutex);
275276

276-
pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
277+
pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
277278
page = pfn_to_page(pfn_first);
278279
spin_lock(&drm->dmem->lock);
279280
for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
@@ -294,8 +295,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
294295
out_bo_free:
295296
nouveau_bo_ref(NULL, &chunk->bo);
296297
out_release:
297-
release_mem_region(chunk->pagemap.res.start,
298-
resource_size(&chunk->pagemap.res));
298+
release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
299299
out_free:
300300
kfree(chunk);
301301
out:
@@ -382,8 +382,8 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
382382
nouveau_bo_ref(NULL, &chunk->bo);
383383
list_del(&chunk->list);
384384
memunmap_pages(&chunk->pagemap);
385-
release_mem_region(chunk->pagemap.res.start,
386-
resource_size(&chunk->pagemap.res));
385+
release_mem_region(chunk->pagemap.range.start,
386+
range_len(&chunk->pagemap.range));
387387
kfree(chunk);
388388
}
389389

drivers/nvdimm/badrange.c

+13-13
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
211211
}
212212

213213
static void badblocks_populate(struct badrange *badrange,
214-
struct badblocks *bb, const struct resource *res)
214+
struct badblocks *bb, const struct range *range)
215215
{
216216
struct badrange_entry *bre;
217217

@@ -222,34 +222,34 @@ static void badblocks_populate(struct badrange *badrange,
222222
u64 bre_end = bre->start + bre->length - 1;
223223

224224
/* Discard intervals with no intersection */
225-
if (bre_end < res->start)
225+
if (bre_end < range->start)
226226
continue;
227-
if (bre->start > res->end)
227+
if (bre->start > range->end)
228228
continue;
229229
/* Deal with any overlap after start of the namespace */
230-
if (bre->start >= res->start) {
230+
if (bre->start >= range->start) {
231231
u64 start = bre->start;
232232
u64 len;
233233

234-
if (bre_end <= res->end)
234+
if (bre_end <= range->end)
235235
len = bre->length;
236236
else
237-
len = res->start + resource_size(res)
237+
len = range->start + range_len(range)
238238
- bre->start;
239-
__add_badblock_range(bb, start - res->start, len);
239+
__add_badblock_range(bb, start - range->start, len);
240240
continue;
241241
}
242242
/*
243243
* Deal with overlap for badrange starting before
244244
* the namespace.
245245
*/
246-
if (bre->start < res->start) {
246+
if (bre->start < range->start) {
247247
u64 len;
248248

249-
if (bre_end < res->end)
250-
len = bre->start + bre->length - res->start;
249+
if (bre_end < range->end)
250+
len = bre->start + bre->length - range->start;
251251
else
252-
len = resource_size(res);
252+
len = range_len(range);
253253
__add_badblock_range(bb, 0, len);
254254
}
255255
}
@@ -267,7 +267,7 @@ static void badblocks_populate(struct badrange *badrange,
267267
* and add badblocks entries for all matching sub-ranges
268268
*/
269269
void nvdimm_badblocks_populate(struct nd_region *nd_region,
270-
struct badblocks *bb, const struct resource *res)
270+
struct badblocks *bb, const struct range *range)
271271
{
272272
struct nvdimm_bus *nvdimm_bus;
273273

@@ -279,7 +279,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
279279
nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
280280

281281
nvdimm_bus_lock(&nvdimm_bus->dev);
282-
badblocks_populate(&nvdimm_bus->badrange, bb, res);
282+
badblocks_populate(&nvdimm_bus->badrange, bb, range);
283283
nvdimm_bus_unlock(&nvdimm_bus->dev);
284284
}
285285
EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);

drivers/nvdimm/claim.c

+8-5
Original file line numberDiff line numberDiff line change
@@ -303,23 +303,26 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
303303
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
304304
resource_size_t size)
305305
{
306-
struct resource *res = &nsio->res;
307306
struct nd_namespace_common *ndns = &nsio->common;
307+
struct range range = {
308+
.start = nsio->res.start,
309+
.end = nsio->res.end,
310+
};
308311

309312
nsio->size = size;
310-
if (!devm_request_mem_region(dev, res->start, size,
313+
if (!devm_request_mem_region(dev, range.start, size,
311314
dev_name(&ndns->dev))) {
312-
dev_warn(dev, "could not reserve region %pR\n", res);
315+
dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
313316
return -EBUSY;
314317
}
315318

316319
ndns->rw_bytes = nsio_rw_bytes;
317320
if (devm_init_badblocks(dev, &nsio->bb))
318321
return -ENOMEM;
319322
nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
320-
&nsio->res);
323+
&range);
321324

322-
nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
325+
nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM);
323326

324327
return PTR_ERR_OR_ZERO(nsio->addr);
325328
}

drivers/nvdimm/nd.h

+2-1
Original file line numberDiff line numberDiff line change
@@ -377,8 +377,9 @@ int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
377377
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
378378
char *name);
379379
unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
380+
struct range;
380381
void nvdimm_badblocks_populate(struct nd_region *nd_region,
381-
struct badblocks *bb, const struct resource *res);
382+
struct badblocks *bb, const struct range *range);
382383
int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
383384
resource_size_t size);
384385
void devm_namespace_disable(struct device *dev,

drivers/nvdimm/pfn_devs.c

+6-6
Original file line numberDiff line numberDiff line change
@@ -672,7 +672,7 @@ static unsigned long init_altmap_reserve(resource_size_t base)
672672

673673
static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
674674
{
675-
struct resource *res = &pgmap->res;
675+
struct range *range = &pgmap->range;
676676
struct vmem_altmap *altmap = &pgmap->altmap;
677677
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
678678
u64 offset = le64_to_cpu(pfn_sb->dataoff);
@@ -689,16 +689,16 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
689689
.end_pfn = PHYS_PFN(end),
690690
};
691691

692-
memcpy(res, &nsio->res, sizeof(*res));
693-
res->start += start_pad;
694-
res->end -= end_trunc;
695-
692+
*range = (struct range) {
693+
.start = nsio->res.start + start_pad,
694+
.end = nsio->res.end - end_trunc,
695+
};
696696
if (nd_pfn->mode == PFN_MODE_RAM) {
697697
if (offset < reserve)
698698
return -EINVAL;
699699
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
700700
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
701-
nd_pfn->npfns = PHYS_PFN((resource_size(res) - offset));
701+
nd_pfn->npfns = PHYS_PFN((range_len(range) - offset));
702702
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
703703
dev_info(&nd_pfn->dev,
704704
"number of pfns truncated from %lld to %ld\n",

0 commit comments

Comments
 (0)