cregit-Linux how code gets into the kernel

Release 4.14 arch/powerpc/platforms/pseries/iommu.c

/*
 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
 *
 * Rewrite, cleanup:
 *
 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
 *
 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
 *
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/crash_dump.h>
#include <linux/memory.h>
#include <linux/of.h>
#include <linux/iommu.h>
#include <linux/rculist.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/tce.h>
#include <asm/ppc-pci.h>
#include <asm/udbg.h>
#include <asm/mmzone.h>
#include <asm/plpar_wrappers.h>

#include "pseries.h"


static struct iommu_table_group *iommu_pseries_alloc_group(int node) { struct iommu_table_group *table_group = NULL; struct iommu_table *tbl = NULL; struct iommu_table_group_link *tgl = NULL; table_group = kzalloc_node(sizeof(struct iommu_table_group), GFP_KERNEL, node); if (!table_group) goto fail_exit; tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node); if (!tbl) goto fail_exit; tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL, node); if (!tgl) goto fail_exit; INIT_LIST_HEAD_RCU(&tbl->it_group_list); kref_init(&tbl->it_kref); tgl->table_group = table_group; list_add_rcu(&tgl->next, &tbl->it_group_list); table_group->tables[0] = tbl; return table_group; fail_exit: kfree(tgl); kfree(table_group); kfree(tbl); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kardashevskiy168100.00%4100.00%
Total168100.00%4100.00%


static void iommu_pseries_free_group(struct iommu_table_group *table_group, const char *node_name) { struct iommu_table *tbl; #ifdef CONFIG_IOMMU_API struct iommu_table_group_link *tgl; #endif if (!table_group) return; tbl = table_group->tables[0]; #ifdef CONFIG_IOMMU_API tgl = list_first_entry_or_null(&tbl->it_group_list, struct iommu_table_group_link, next); WARN_ON_ONCE(!tgl); if (tgl) { list_del_rcu(&tgl->next); kfree(tgl); } if (table_group->group) { iommu_group_put(table_group->group); BUG_ON(table_group->group); } #endif iommu_tce_table_put(tbl); kfree(table_group); }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kardashevskiy123100.00%4100.00%
Total123100.00%4100.00%


static int tce_build_pSeries(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, unsigned long attrs) { u64 proto_tce; __be64 *tcep, *tces; u64 rpn; proto_tce = TCE_PCI_READ; // Read allowed if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; tces = tcep = ((__be64 *)tbl->it_base) + index; while (npages--) { /* can't move this out since we might cross MEMBLOCK boundary */ rpn = __pa(uaddr) >> TCE_SHIFT; *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT); uaddr += TCE_PAGE_SIZE; tcep++; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt6656.90%216.67%
Olof Johansson2723.28%216.67%
Milton D. Miller II54.31%18.33%
Anton Blanchard54.31%18.33%
Robert Jennings43.45%18.33%
Andrew Morton32.59%18.33%
Krzysztof Kozlowski21.72%18.33%
Mark Nelson21.72%18.33%
Yinghai Lu10.86%18.33%
Michael Ellerman10.86%18.33%
Total116100.00%12100.00%


static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) { __be64 *tcep, *tces; tces = tcep = ((__be64 *)tbl->it_base) + index; while (npages--) *(tcep++) = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt3871.70%125.00%
Olof Johansson815.09%125.00%
Milton D. Miller II59.43%125.00%
Anton Blanchard23.77%125.00%
Total53100.00%4100.00%


static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) { __be64 *tcep; tcep = ((__be64 *)tbl->it_base) + index; return be64_to_cpu(*tcep); }

Contributors

PersonTokensPropCommitsCommitProp
Haren Myneni3587.50%150.00%
Anton Blanchard512.50%150.00%
Total40100.00%2100.00%

static void tce_free_pSeriesLP(struct iommu_table*, long, long); static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, enum dma_data_direction direction, unsigned long attrs) { u64 rc = 0; u64 proto_tce, tce; u64 rpn; int ret = 0; long tcenum_start = tcenum, npages_start = npages; rpn = __pa(uaddr) >> TCE_SHIFT; proto_tce = TCE_PCI_READ; if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; while (npages--) { tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce); if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { ret = (int)rc; tce_free_pSeriesLP(tbl, tcenum_start, (npages_start - (npages + 1))); break; } if (rc && printk_ratelimit()) { printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\ttcenum = 0x%llx\n", (u64)tcenum); printk("\ttce val = 0x%llx\n", tce ); dump_stack(); } tcenum++; rpn++; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt12056.60%111.11%
Robert Jennings5626.42%111.11%
Olof Johansson2612.26%222.22%
Ingo Molnar41.89%111.11%
Mark Nelson20.94%111.11%
Krzysztof Kozlowski20.94%111.11%
Anton Blanchard10.47%111.11%
Michael Ellerman10.47%111.11%
Total212100.00%9100.00%

static DEFINE_PER_CPU(__be64 *, tce_page);
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, enum dma_data_direction direction, unsigned long attrs) { u64 rc = 0; u64 proto_tce; __be64 *tcep; u64 rpn; long l, limit; long tcenum_start = tcenum, npages_start = npages; int ret = 0; unsigned long flags; if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) { return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction, attrs); } local_irq_save(flags); /* to protect tcep and the page behind it */ tcep = __this_cpu_read(tce_page); /* This is safe to do since interrupts are off when we're called * from iommu_alloc{,_sg}() */ if (!tcep) { tcep = (__be64 *)__get_free_page(GFP_ATOMIC); /* If allocation fails, fall back to the loop implementation */ if (!tcep) { local_irq_restore(flags); return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction, attrs); } __this_cpu_write(tce_page, tcep); } rpn = __pa(uaddr) >> TCE_SHIFT; proto_tce = TCE_PCI_READ; if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; /* We can map max one pageful of TCEs at a time */ do { /* * Set up the page with TCE data, looping through and setting * the values. */ limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE); for (l = 0; l < limit; l++) { tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT); rpn++; } rc = plpar_tce_put_indirect((u64)tbl->it_index, (u64)tcenum << 12, (u64)__pa(tcep), limit); npages -= limit; tcenum += limit; } while (npages > 0 && !rc); local_irq_restore(flags); if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { ret = (int)rc; tce_freemulti_pSeriesLP(tbl, tcenum_start, (npages_start - (npages + limit))); return ret; } if (rc && printk_ratelimit()) { printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\tnpages = 0x%llx\n", (u64)npages); printk("\ttce[0] val = 0x%llx\n", tcep[0]); dump_stack(); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt24563.14%17.14%
Robert Jennings6215.98%17.14%
Anton Blanchard266.70%321.43%
Olof Johansson256.44%214.29%
Alexey Kardashevskiy82.06%17.14%
Mark Nelson61.55%17.14%
Michael Ellerman61.55%214.29%
Christoph Lameter41.03%17.14%
Ingo Molnar41.03%17.14%
Krzysztof Kozlowski20.52%17.14%
Total388100.00%14100.00%


static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) { u64 rc; while (npages--) { rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0); if (rc && printk_ratelimit()) { printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\ttcenum = 0x%llx\n", (u64)tcenum); dump_stack(); } tcenum++; } }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt8794.57%240.00%
Ingo Molnar33.26%120.00%
Anton Blanchard11.09%120.00%
Olof Johansson11.09%120.00%
Total92100.00%5100.00%


static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) { u64 rc; if (!firmware_has_feature(FW_FEATURE_MULTITCE)) return tce_free_pSeriesLP(tbl, tcenum, npages); rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages); if (rc && printk_ratelimit()) { printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n"); printk("\trc = %lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\tnpages = 0x%llx\n", (u64)npages); dump_stack(); } }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt8478.50%233.33%
Alexey Kardashevskiy1816.82%116.67%
Ingo Molnar32.80%116.67%
Anton Blanchard10.93%116.67%
Olof Johansson10.93%116.67%
Total107100.00%6100.00%


static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) { u64 rc; unsigned long tce_ret; rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret); if (rc && printk_ratelimit()) { printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\ttcenum = 0x%llx\n", (u64)tcenum); dump_stack(); } return tce_ret; }

Contributors

PersonTokensPropCommitsCommitProp
Haren Myneni8495.45%133.33%
Ingo Molnar33.41%133.33%
Anton Blanchard11.14%133.33%
Total88100.00%3100.00%

/* this is compatible with cells for the device tree property */ struct dynamic_dma_window_prop { __be32 liobn; /* tce table number */ __be64 dma_base; /* address hi,lo */ __be32 tce_shift; /* ilog2(tce_page_size) */ __be32 window_shift; /* ilog2(tce_window_size) */ }; struct direct_window { struct device_node *device; const struct dynamic_dma_window_prop *prop; struct list_head list; }; /* Dynamic DMA Window support */ struct ddw_query_response { u32 windows_available; u32 largest_available_block; u32 page_size; u32 migration_capable; }; struct ddw_create_response { u32 liobn; u32 addr_hi; u32 addr_lo; }; static LIST_HEAD(direct_window_list); /* prevents races between memory on/offline and window creation */ static DEFINE_SPINLOCK(direct_window_list_lock); /* protects initializing window twice for same device */ static DEFINE_MUTEX(direct_window_init_mutex); #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn, unsigned long num_pfn, const void *arg) { const struct dynamic_dma_window_prop *maprange = arg; int rc; u64 tce_size, num_tce, dma_offset, next; u32 tce_shift; long limit; tce_shift = be32_to_cpu(maprange->tce_shift); tce_size = 1ULL << tce_shift; next = start_pfn << PAGE_SHIFT; num_tce = num_pfn << PAGE_SHIFT; /* round back to the beginning of the tce page size */ num_tce += next & (tce_size - 1); next &= ~(tce_size - 1); /* covert to number of tces */ num_tce |= tce_size - 1; num_tce >>= tce_shift; do { /* * Set up the page with TCE data, looping through and setting * the values. */ limit = min_t(long, num_tce, 512); dma_offset = next + be64_to_cpu(maprange->dma_base); rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn), dma_offset, 0, limit); next += limit * tce_size; num_tce -= limit; } while (num_tce > 0 && !rc); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Nishanth Aravamudan13879.77%233.33%
Benjamin Herrenschmidt2816.18%233.33%
Anton Blanchard52.89%116.67%
Nathan T. Lynch21.16%116.67%
Total173100.00%6100.00%


static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, unsigned long num_pfn, const void *arg) { const struct dynamic_dma_window_prop *maprange = arg; u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn; __be64 *tcep; u32 tce_shift; u64 rc = 0; long l, limit; local_irq_disable(); /* to protect tcep and the page behind it */ tcep = __this_cpu_read(tce_page); if (!tcep) { tcep = (__be64 *)__get_free_page(GFP_ATOMIC); if (!tcep) { local_irq_enable(); return -ENOMEM; } __this_cpu_write(tce_page, tcep); } proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; liobn = (u64)be32_to_cpu(maprange->liobn); tce_shift = be32_to_cpu(maprange->tce_shift); tce_size = 1ULL << tce_shift; next = start_pfn << PAGE_SHIFT; num_tce = num_pfn << PAGE_SHIFT; /* round back to the beginning of the tce page size */ num_tce += next & (tce_size - 1); next &= ~(tce_size - 1); /* covert to number of tces */ num_tce |= tce_size - 1; num_tce >>= tce_shift; /* We can map max one pageful of TCEs at a time */ do { /* * Set up the page with TCE data, looping through and setting * the values. */ limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE); dma_offset = next + be64_to_cpu(maprange->dma_base); for (l = 0; l < limit; l++) { tcep[l] = cpu_to_be64(proto_tce | next); next += tce_size; } rc = plpar_tce_put_indirect(liobn, dma_offset, (u64)__pa(tcep), limit); num_tce -= limit; } while (num_tce > 0 && !rc); /* error cleanup: caller will clear whole range */ local_irq_enable(); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Nishanth Aravamudan27095.41%125.00%
Anton Blanchard82.83%125.00%
Christoph Lameter41.41%125.00%
Michael Ellerman10.35%125.00%
Total283100.00%4100.00%


static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn, unsigned long num_pfn, void *arg) { return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg); }

Contributors

PersonTokensPropCommitsCommitProp
Nishanth Aravamudan28100.00%1100.00%
Total28100.00%1100.00%


static void iommu_table_setparms(struct pci_controller *phb, struct device_node *dn, struct iommu_table *tbl) { struct device_node *node; const unsigned long *basep; const u32 *sizep; node = phb->dn; basep = of_get_property(node, "linux,tce-base", NULL); sizep = of_get_property(node, "linux,tce-size", NULL); if (basep == NULL || sizep == NULL) { printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %pOF has " "missing tce entries !\n", dn); return; } tbl->it_base = (unsigned long)__va(*basep); if (!is_kdump_kernel()) memset((void *)tbl->it_base, 0, *sizep); tbl->it_busno = phb->bus->number; tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; /* Units of tce entries */ tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift; /* Test if we are going over 2GB of DMA space */ if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n"); } phb->dma_window_base_cur += phb->dma_window_size; /* Set the tce table size - measured in entries */ tbl->it_size = phb->dma_window_size >> tbl->it_page_shift; tbl->it_index = 0; tbl->it_blocksize = 16; tbl->it_type = TCE_PCI; }

Contributors

PersonTokensPropCommitsCommitProp
Nishanth Aravamudan20393.98%133.33%
Alistair Popple125.56%133.33%
Rob Herring10.46%133.33%
Total216100.00%3100.00%

/* * iommu_table_setparms_lpar * * Function: On pSeries LPAR systems, return TCE table info, given a pci bus. */
static void iommu_table_setparms_lpar(struct pci_controller *phb, struct device_node *dn, struct iommu_table *tbl, struct iommu_table_group *table_group, const __be32 *dma_window) { unsigned long offset, size; of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); tbl->it_busno = phb->bus->number; tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; tbl->it_base = 0; tbl->it_blocksize = 16; tbl->it_type = TCE_PCI; tbl->it_offset = offset >> tbl->it_page_shift; tbl->it_size = size >> tbl->it_page_shift; table_group->tce32_start = offset; table_group->tce32_size = size; }

Contributors

PersonTokensPropCommitsCommitProp
Nishanth Aravamudan9175.21%125.00%
Alexey Kardashevskiy1714.05%125.00%
Alistair Popple129.92%125.00%
Anton Blanchard10.83%125.00%
Total121100.00%4100.00%

struct iommu_table_ops iommu_table_pseries_ops = { .set = tce_build_pSeries, .clear = tce_free_pSeries, .get = tce_get_pseries };
static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) { struct device_node *dn; struct iommu_table *tbl; struct device_node *isa_dn, *isa_dn_orig; struct device_node *tmp; struct pci_dn *pci; int children; dn = pci_bus_to_OF_node(bus); pr_debug("pci_dma_bus_setup_pSeries: setting up bus %pOF\n", dn); if (bus->self) { /* This is not a root bus, any setup will be done for the * device-side of the bridge in iommu_dev_setup_pSeries(). */ return; } pci = PCI_DN(dn); /* Check if the ISA bus on the system is under * this PHB. */ isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa"); while (isa_dn && isa_dn != dn) isa_dn = isa_dn->parent; of_node_put(isa_dn_orig); /* Count number of direct PCI children of the PHB. */ for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling) children++; pr_debug("Children: %d\n", children); /* Calculate amount of DMA window per slot. Each window must be * a power of two (due to pci_alloc_consistent requirements). * * Keep 256MB aside for PHBs with ISA. */ if (!isa_dn) { /* No ISA/IDE - just set window size and return */ pci->phb->dma_window_size = 0x80000000ul; /* To be divided */ while (pci->phb->dma_window_size * children > 0x80000000ul) pci->phb->dma_window_size >>= 1; pr_debug("No ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size); pci->phb->dma_window_base_cur = 0; return; } /* If we have ISA, then we probably have an IDE * controller too. Allocate a 128MB table but * skip the first 128MB to avoid stepping on ISA * space. */ pci->phb->dma_window_size = 0x8000000ul; pci->phb->dma_window_base_cur = 0x8000000ul; pci->table_group = iommu_pseries_alloc_group(pci->phb->node); tbl = pci->table_group->tables[0]; iommu_table_setparms(pci->phb, dn, tbl); tbl->it_ops = &iommu_table_pseries_ops; iommu_init_table(tbl, pci->phb->node); iommu_register_group(pci->table_group, pci_domain_nr(bus), 0); /* Divide the rest (1.75GB) among the children */ pci->phb->dma_window_size = 0x80000000ul; while (pci->phb->dma_window_size * children > 0x70000000ul) pci->phb->dma_window_size >>= 1; pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size); }

Contributors

PersonTokensPropCommitsCommitProp
Nishanth Aravamudan28088.33%120.00%
Alexey Kardashevskiy3611.36%360.00%
Rob Herring10.32%120.00%
Total317100.00%5100.00%

#ifdef CONFIG_IOMMU_API
static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned long *tce, enum dma_data_direction *direction) { long rc; unsigned long ioba = (unsigned long) index << tbl->it_page_shift; unsigned long flags, oldtce = 0; u64 proto_tce = iommu_direction_to_tce_perm(*direction); unsigned long newtce = *tce | proto_tce; spin_lock_irqsave(&tbl->large_pool.lock, flags); rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce); if (!rc) rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce); if (!rc) { *direction = iommu_tce_direction(oldtce); *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE); } spin_unlock_irqrestore(&tbl->large_pool.lock, flags); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kardashevskiy159100.00%1100.00%
Total159100.00%1100.00%

#endif struct iommu_table_ops iommu_table_lpar_multi_ops