cregit-Linux how code gets into the kernel

Release 4.17 drivers/gpu/drm/i915/i915_gem_stolen.c

/*
 * Copyright © 2008-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"

/*
 * The BIOS typically reserves some of the system's memory for the exclusive
 * use of the integrated graphics. This memory is no longer available for
 * use by the OS and so the user finds that his system has less memory
 * available than he put in. We refer to this memory as stolen.
 *
 * The BIOS will allocate its framebuffer from the stolen memory. Our
 * goal is try to reuse that object for our own fbcon which must always
 * be available for panics. Anything else we can reuse the stolen memory
 * for is a boon.
 */


int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, struct drm_mm_node *node, u64 size, unsigned alignment, u64 start, u64 end) { int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) return -ENODEV; mutex_lock(&dev_priv->mm.stolen_lock); ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, alignment, 0, start, end, DRM_MM_INSERT_BEST); mutex_unlock(&dev_priv->mm.stolen_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Paulo Zanoni9396.88%375.00%
Chris Wilson33.12%125.00%
Total96100.00%4100.00%


int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, struct drm_mm_node *node, u64 size, unsigned alignment) { return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, alignment, 0, U64_MAX); }

Contributors

PersonTokensPropCommitsCommitProp
Paulo Zanoni37100.00%2100.00%
Total37100.00%2100.00%


void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, struct drm_mm_node *node) { mutex_lock(&dev_priv->mm.stolen_lock); drm_mm_remove_node(node); mutex_unlock(&dev_priv->mm.stolen_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Paulo Zanoni40100.00%2100.00%
Total40100.00%2100.00%


static int i915_adjust_stolen(struct drm_i915_private *dev_priv, struct resource *dsm) { struct i915_ggtt *ggtt = &dev_priv->ggtt; struct resource *r; if (dsm->start == 0 || dsm->end <= dsm->start) return -EINVAL; /* * TODO: We have yet too encounter the case where the GTT wasn't at the * end of stolen. With that assumption we could simplify this. */ /* Make sure we don't clobber the GTT if it's within stolen memory */ if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) { struct resource stolen[2] = {*dsm, *dsm}; struct resource ggtt_res; resource_size_t ggtt_start; ggtt_start = I915_READ(PGTBL_CTL); if (IS_GEN4(dev_priv)) ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; else ggtt_start &= PGTBL_ADDRESS_LO_MASK; ggtt_res = (struct resource) DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4); if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) stolen[0].end = ggtt_res.start; if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) stolen[1].start = ggtt_res.end; /* Pick the larger of the two chunks */ if (resource_size(&stolen[0]) > resource_size(&stolen[1])) *dsm = stolen[0]; else *dsm = stolen[1]; if (stolen[0].start != stolen[1].start || stolen[0].end != stolen[1].end) { DRM_DEBUG_KMS("GTT within stolen memory at %pR\n", &ggtt_res); DRM_DEBUG_KMS("Stolen memory adjusted to %pR\n", dsm); } } /* * Verify that nothing else uses this physical address. Stolen * memory should be reserved by the BIOS and hidden from the * kernel. So if the region is already marked as busy, something * is seriously wrong. */ r = devm_request_mem_region(dev_priv->drm.dev, dsm->start, resource_size(dsm), "Graphics Stolen Memory"); if (r == NULL) { /* * One more attempt but this time requesting region from * start + 1, as we have seen that this resolves the region * conflict with the PCI Bus. * This is a BIOS w/a: Some BIOS wrap stolen in the root * PCI bus, but have an off-by-one error. Hence retry the * reservation starting from 1 instead of 0. * There's also BIOS with off-by-one on the other end. */ r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1, resource_size(dsm) - 2, "Graphics Stolen Memory"); /* * GEN3 firmware likes to smash pci bridges into the stolen * range. Apparently this works. */ if (r == NULL && !IS_GEN3(dev_priv)) { DRM_ERROR("conflict detected with stolen region: %pR\n", dsm); return -EBUSY; } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ville Syrjälä18446.12%211.11%
Matthew Auld10526.32%316.67%
Chris Wilson399.77%316.67%
Akash Goel235.76%15.56%
Joonas Lahtinen184.51%211.11%
Tvrtko A. Ursulin143.51%316.67%
Daniel Vetter71.75%211.11%
Jani Nikula61.50%15.56%
Paulo Zanoni30.75%15.56%
Total399100.00%18100.00%


void i915_gem_cleanup_stolen(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); if (!drm_mm_initialized(&dev_priv->mm.stolen)) return; drm_mm_takedown(&dev_priv->mm.stolen); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Vetter3170.45%250.00%
Chris Wilson1329.55%250.00%
Total44100.00%4100.00%


static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, resource_size_t *base, resource_size_t *size) { uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? CTG_STOLEN_RESERVED : ELK_STOLEN_RESERVED); resource_size_t stolen_top = dev_priv->dsm.end + 1; if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) { *base = 0; *size = 0; return; } /* * Whether ILK really reuses the ELK register for this is unclear. * Let's see if we catch anyone with this supposedly enabled on ILK. */ WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val); *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); /* On these platforms, the register doesn't have a size field, so the * size is the distance between the base and the top of the stolen * memory. We also have the genuine case where base is zero and there's * nothing reserved. */ if (*base == 0) *size = 0; else *size = stolen_top - *base; }

Contributors

PersonTokensPropCommitsCommitProp
Ville Syrjälä12095.24%360.00%
Matthew Auld64.76%240.00%
Total126100.00%5100.00%


static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, resource_size_t *base, resource_size_t *size) { uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) { *base = 0; *size = 0; return; } *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { case GEN6_STOLEN_RESERVED_1M: *size = 1024 * 1024; break; case GEN6_STOLEN_RESERVED_512K: *size = 512 * 1024; break; case GEN6_STOLEN_RESERVED_256K: *size = 256 * 1024; break; case GEN6_STOLEN_RESERVED_128K: *size = 128 * 1024; break; default: *size = 1024 * 1024; MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); } }

Contributors

PersonTokensPropCommitsCommitProp
Paulo Zanoni9979.84%133.33%
Ville Syrjälä2318.55%133.33%
Matthew Auld21.61%133.33%
Total124100.00%3100.00%


static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, resource_size_t *base, resource_size_t *size) { uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) { *base = 0; *size = 0; return; } *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { case GEN7_STOLEN_RESERVED_1M: *size = 1024 * 1024; break; case GEN7_STOLEN_RESERVED_256K: *size = 256 * 1024; break; default: *size = 1024 * 1024; MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); } }

Contributors

PersonTokensPropCommitsCommitProp
Paulo Zanoni7775.49%133.33%
Ville Syrjälä2322.55%133.33%
Matthew Auld21.96%133.33%
Total102100.00%3100.00%


static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv, resource_size_t *base, resource_size_t *size) { uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) { *base = 0; *size = 0; return; } *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { case GEN8_STOLEN_RESERVED_1M: *size = 1024 * 1024; break; case GEN8_STOLEN_RESERVED_2M: *size = 2 * 1024 * 1024; break; case GEN8_STOLEN_RESERVED_4M: *size = 4 * 1024 * 1024; break; case GEN8_STOLEN_RESERVED_8M: *size = 8 * 1024 * 1024; break; default: *size = 8 * 1024 * 1024; MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); } }

Contributors

PersonTokensPropCommitsCommitProp
Paulo Zanoni10680.30%125.00%
Ville Syrjälä2317.42%125.00%
Matthew Auld21.52%125.00%
Rodrigo Vivi10.76%125.00%
Total132100.00%4100.00%


static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, resource_size_t *base, resource_size_t *size) { uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); resource_size_t stolen_top; if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) { *base = 0; *size = 0; return; } stolen_top = dev_priv->dsm.end + 1; *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; /* On these platforms, the register doesn't have a size field, so the * size is the distance between the base and the top of the stolen * memory. We also have the genuine case where base is zero and there's * nothing reserved. */ if (*base == 0) *size = 0; else *size = stolen_top - *base; }

Contributors

PersonTokensPropCommitsCommitProp
Paulo Zanoni6368.48%125.00%
Ville Syrjälä2325.00%125.00%
Matthew Auld66.52%250.00%
Total92100.00%4100.00%


int i915_gem_init_stolen(struct drm_i915_private *dev_priv) { resource_size_t reserved_base, stolen_top; resource_size_t reserved_total, reserved_size; resource_size_t stolen_usable_start; mutex_init(&dev_priv->mm.stolen_lock); if (intel_vgpu_active(dev_priv)) { DRM_INFO("iGVT-g active, disabling use of stolen memory\n"); return 0; } if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) { DRM_INFO("DMAR active, disabling use of stolen memory\n"); return 0; } if (resource_size(&intel_graphics_stolen_res) == 0) return 0; dev_priv->dsm = intel_graphics_stolen_res; if (i915_adjust_stolen(dev_priv, &dev_priv->dsm)) return 0; GEM_BUG_ON(dev_priv->dsm.start == 0); GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start); stolen_top = dev_priv->dsm.end + 1; reserved_base = 0; reserved_size = 0; switch (INTEL_GEN(dev_priv)) { case 2: case 3: break; case 4: if (!IS_G4X(dev_priv)) break; /* fall through */ case 5: g4x_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); break; case 6: gen6_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); break; case 7: gen7_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); break; default: if (IS_LP(dev_priv)) chv_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); else bdw_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); break; } /* It is possible for the reserved base to be zero, but the register * field for size doesn't have a zero option. */ if (reserved_base == 0) { reserved_size = 0; reserved_base = stolen_top; } dev_priv->dsm_reserved = (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size); if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) { DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n", &dev_priv->dsm_reserved, &dev_priv->dsm); return 0; } /* It is possible for the reserved area to end before the end of stolen * memory, so just consider the start. */ reserved_total = stolen_top - reserved_base; DRM_DEBUG_KMS("Memory reserved for graphics device: %lluK, usable: %lluK\n", (u64)resource_size(&dev_priv->dsm) >> 10, ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10); stolen_usable_start = 0; /* WaSkipStolenMemoryFirstPage:bdw+ */ if (INTEL_GEN(dev_priv) >= 8) stolen_usable_start = 4096; dev_priv->stolen_usable_size = resource_size(&dev_priv->dsm) - reserved_total - stolen_usable_start; /* Basic memrange allocator for stolen space. */ drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start, dev_priv->stolen_usable_size); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paulo Zanoni14434.29%412.90%
Matthew Auld10424.76%412.90%
Chris Wilson9221.90%722.58%
Daniel Vetter317.38%39.68%
Ville Syrjälä266.19%412.90%
Tvrtko A. Ursulin71.67%412.90%
Rodrigo Vivi51.19%26.45%
Jesse Barnes51.19%13.23%
Joonas Lahtinen51.19%13.23%
Ben Widawsky10.24%13.23%
Total420100.00%31100.00%


static struct sg_table * i915_pages_create_for_stolen(struct drm_device *dev, resource_size_t offset, resource_size_t size) { struct drm_i915_private *dev_priv = to_i915(dev); struct sg_table *st; struct scatterlist *sg; GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm))); /* We hide that we have no struct page backing our stolen object * by wrapping the contiguous physical allocation with a fake * dma mapping in a single scatterlist. */ st = kmalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) return ERR_PTR(-ENOMEM); if (sg_alloc_table(st, 1, GFP_KERNEL)) { kfree(st); return ERR_PTR(-ENOMEM); } sg = st->sgl; sg->offset = 0; sg->length = size; sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset; sg_dma_len(sg) = size; return st; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Wilson11877.12%436.36%
Matthew Auld2013.07%327.27%
Imre Deak85.23%218.18%
Joonas Lahtinen63.92%19.09%
Akash Goel10.65%19.09%
Total153100.00%11100.00%


static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) { struct sg_table *pages = i915_pages_create_for_stolen(obj->base.dev, obj->stolen->start, obj->stolen->size); if (IS_ERR(pages)) return PTR_ERR(pages); __i915_gem_object_set_pages(obj, pages, obj->stolen->size); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Matthew Auld3654.55%250.00%
Chris Wilson3045.45%250.00%
Total66100.00%4100.00%


static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, struct sg_table *pages) { /* Should only be called from i915_gem_object_release_stolen() */ sg_free_table(pages); kfree(pages); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Wilson27100.00%3100.00%
Total27100.00%3100.00%


static void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); GEM_BUG_ON(!stolen); __i915_gem_object_unpin_pages(obj); i915_gem_stolen_remove_node(dev_priv, stolen); kfree(stolen); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Wilson4777.05%480.00%
Paulo Zanoni1422.95%120.00%
Total61100.00%5100.00%

static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { .get_pages = i915_gem_object_get_pages_stolen, .put_pages = i915_gem_object_put_pages_stolen, .release = i915_gem_object_release_stolen, };
static struct drm_i915_gem_object * _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, struct drm_mm_node *stolen) { struct drm_i915_gem_object *obj; unsigned int cache_level; obj = i915_gem_object_alloc(dev_priv); if (obj == NULL) return NULL; drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size); i915_gem_object_init(obj, &i915_gem_object_stolen_ops); obj->stolen = stolen; obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; i915_gem_object_set_cache_coherency(obj, cache_level); if (i915_gem_object_pin_pages(obj)) goto cleanup; return obj; cleanup: i915_gem_object_free(obj); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Wilson11391.87%666.67%
Tvrtko A. Ursulin97.32%222.22%
David Herrmann10.81%111.11%
Total123100.00%9100.00%


struct drm_i915_gem_object * i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, resource_size_t size) { struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) return NULL; if (size == 0) return NULL; stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); if (!stolen) return NULL; ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); if (ret) { kfree(stolen); return NULL; } obj = _i915_gem_object_create_stolen(dev_priv, stolen); if (obj) return obj; i915_gem_stolen_remove_node(dev_priv, stolen); kfree(stolen); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Wilson8362.88%116.67%
David Herrmann3526.52%116.67%
Daniel Vetter64.55%116.67%
Paulo Zanoni43.03%116.67%
Tvrtko A. Ursulin32.27%116.67%
Matthew Auld10.76%116.67%
Total132100.00%6100.00%


struct drm_i915_gem_object * i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, resource_size_t stolen_offset, resource_size_t gtt_offset, resource_size_t size) { struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; struct i915_vma *vma; int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) return NULL; lockdep_assert_held(&dev_priv->drm.struct_mutex); DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n", &stolen_offset, &gtt_offset, &size); /* KISS and expect everything to be page-aligned */ if (WARN_ON(size == 0) || WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) return NULL; stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); if (!stolen) return NULL; stolen->start = stolen_offset; stolen->size = size; mutex_lock(&dev_priv->mm.stolen_lock); ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); mutex_unlock(&dev_priv->mm.stolen_lock); if (ret) { DRM_DEBUG_KMS("failed to allocate stolen space\n"); kfree(stolen); return NULL; } obj = _i915_gem_object_create_stolen(dev_priv, stolen); if (obj == NULL) { DRM_DEBUG_KMS("failed to allocate stolen object\n"); i915_gem_stolen_remove_node(dev_priv, stolen); kfree(stolen); return NULL; } /* Some objects just need physical mem from stolen space */ if (gtt_offset == I915_GTT_OFFSET_NONE) return obj; ret = i915_gem_object_pin_pages(obj); if (ret) goto err; vma = i915_vma_instance(obj, &ggtt->base, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err_pages; } /* To simplify the initialisation sequence between KMS and GTT, * we allow construction of the stolen object prior to * setting up the GTT space. The actual reservation will occur * later. */ ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node, size, gtt_offset, obj->cache_level, 0); if (ret) { DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); goto err_pages; } GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); vma->pages = obj->mm.pages; vma->flags |= I915_VMA_GLOBAL_BIND; __i915_vma_set_map_and_fenceable(vma); list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); spin_lock(&dev_priv->mm.obj_lock); list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); obj->bind_count++; spin_unlock(&dev_priv->mm.obj_lock); return obj; err_pages: i915_gem_object_unpin_pages(obj); err: i915_gem_object_put(obj); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Wilson26859.03%1643.24%
Ben Widawsky9320.48%616.22%
Paulo Zanoni235.07%25.41%
Daniel Vetter194.19%410.81%
Tvrtko A. Ursulin153.30%38.11%
Jesse Barnes91.98%12.70%
Joonas Lahtinen81.76%25.41%
Matthew Auld71.54%12.70%
Dan Carpenter71.54%12.70%
David Herrmann51.10%12.70%
Total454100.00%37100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Chris Wilson86532.49%3636.36%
Paulo Zanoni70326.41%66.06%
Ville Syrjälä42215.85%77.07%
Matthew Auld29110.93%88.08%
Ben Widawsky943.53%77.07%
Daniel Vetter943.53%1010.10%
Tvrtko A. Ursulin481.80%99.09%
David Herrmann411.54%22.02%
Joonas Lahtinen371.39%22.02%
Akash Goel240.90%22.02%
Jesse Barnes140.53%22.02%
Imre Deak80.30%22.02%
Dan Carpenter70.26%11.01%
Rodrigo Vivi60.23%33.03%
Jani Nikula60.23%11.01%
David Howells20.08%11.01%
Total2662100.00%99100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.