drm/ttm: replace busy placement with flags v6
Instead of a list of separate busy placement add flags which indicate that a placement should only be used when there is room or if we need to evict. v2: add missing TTM_PL_FLAG_IDLE for i915 v3: fix auto build test ERROR on drm-tip/drm-tip v4: fix some typos pointed out by checkpatch v5: cleanup some rebase problems with VMWGFX v6: implement some missing VMWGFX functionality pointed out by Zack, rename the flags as suggested by Michel, rebase on drm-tip and adjust XE as well Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Somalapuram Amaranath <Amaranath.Somalapuram@amd.com> Reviewed-by: Zack Rusin <zack.rusin@broadcom.com> Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240112125158.2748-4-christian.koenig@amd.com
This commit is contained in:
parent
28e5126718
commit
a78a8da51b
|
@ -220,9 +220,6 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
|||
|
||||
placement->num_placement = c;
|
||||
placement->placement = places;
|
||||
|
||||
placement->num_busy_placement = c;
|
||||
placement->busy_placement = places;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1406,8 +1403,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
AMDGPU_GEM_DOMAIN_GTT);
|
||||
|
||||
/* Avoid costly evictions; only set GTT as a busy placement */
|
||||
abo->placement.num_busy_placement = 1;
|
||||
abo->placement.busy_placement = &abo->placements[1];
|
||||
abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
|
||||
|
||||
r = ttm_bo_validate(bo, &abo->placement, &ctx);
|
||||
if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
|
||||
|
|
|
@ -102,23 +102,19 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
|||
/* Don't handle scatter gather BOs */
|
||||
if (bo->type == ttm_bo_type_sg) {
|
||||
placement->num_placement = 0;
|
||||
placement->num_busy_placement = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Object isn't an AMDGPU object so ignore */
|
||||
if (!amdgpu_bo_is_amdgpu_bo(bo)) {
|
||||
placement->placement = &placements;
|
||||
placement->busy_placement = &placements;
|
||||
placement->num_placement = 1;
|
||||
placement->num_busy_placement = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
abo = ttm_to_amdgpu_bo(bo);
|
||||
if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
|
||||
placement->num_placement = 0;
|
||||
placement->num_busy_placement = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -128,13 +124,13 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
|||
case AMDGPU_PL_OA:
|
||||
case AMDGPU_PL_DOORBELL:
|
||||
placement->num_placement = 0;
|
||||
placement->num_busy_placement = 0;
|
||||
return;
|
||||
|
||||
case TTM_PL_VRAM:
|
||||
if (!adev->mman.buffer_funcs_enabled) {
|
||||
/* Move to system memory */
|
||||
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
|
||||
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
|
||||
amdgpu_bo_in_cpu_visible_vram(abo)) {
|
||||
|
@ -149,8 +145,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
|||
AMDGPU_GEM_DOMAIN_CPU);
|
||||
abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
|
||||
abo->placements[0].lpfn = 0;
|
||||
abo->placement.busy_placement = &abo->placements[1];
|
||||
abo->placement.num_busy_placement = 1;
|
||||
abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
|
||||
} else {
|
||||
/* Move to GTT memory */
|
||||
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
|
||||
|
@ -967,8 +962,6 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
|||
/* allocate GART space */
|
||||
placement.num_placement = 1;
|
||||
placement.placement = &placements;
|
||||
placement.num_busy_placement = 1;
|
||||
placement.busy_placement = &placements;
|
||||
placements.fpfn = 0;
|
||||
placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
|
||||
placements.mem_type = TTM_PL_TT;
|
||||
|
|
|
@ -147,7 +147,6 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
|
|||
invariant_flags = TTM_PL_FLAG_TOPDOWN;
|
||||
|
||||
gbo->placement.placement = gbo->placements;
|
||||
gbo->placement.busy_placement = gbo->placements;
|
||||
|
||||
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) {
|
||||
gbo->placements[c].mem_type = TTM_PL_VRAM;
|
||||
|
@ -160,7 +159,6 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
|
|||
}
|
||||
|
||||
gbo->placement.num_placement = c;
|
||||
gbo->placement.num_busy_placement = c;
|
||||
|
||||
for (i = 0; i < c; ++i) {
|
||||
gbo->placements[i].fpfn = 0;
|
||||
|
|
|
@ -65,8 +65,6 @@ static const struct ttm_place sys_placement_flags = {
|
|||
static struct ttm_placement i915_sys_placement = {
|
||||
.num_placement = 1,
|
||||
.placement = &sys_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &sys_placement_flags,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -157,32 +155,28 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
|
|||
|
||||
static void
|
||||
i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
|
||||
struct ttm_place *requested,
|
||||
struct ttm_place *busy,
|
||||
struct ttm_place *places,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
unsigned int num_allowed = obj->mm.n_placements;
|
||||
unsigned int flags = obj->flags;
|
||||
unsigned int i;
|
||||
|
||||
placement->num_placement = 1;
|
||||
places[0].flags |= TTM_PL_FLAG_DESIRED;
|
||||
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
|
||||
obj->mm.region, requested, obj->bo_offset,
|
||||
obj->mm.region, &places[0], obj->bo_offset,
|
||||
obj->base.size, flags);
|
||||
|
||||
/* Cache this on object? */
|
||||
placement->num_busy_placement = num_allowed;
|
||||
for (i = 0; i < placement->num_busy_placement; ++i)
|
||||
i915_ttm_place_from_region(obj->mm.placements[i], busy + i,
|
||||
obj->bo_offset, obj->base.size, flags);
|
||||
|
||||
if (num_allowed == 0) {
|
||||
*busy = *requested;
|
||||
placement->num_busy_placement = 1;
|
||||
for (i = 0; i < num_allowed; ++i) {
|
||||
i915_ttm_place_from_region(obj->mm.placements[i],
|
||||
&places[i + 1], obj->bo_offset,
|
||||
obj->base.size, flags);
|
||||
places[i + 1].flags |= TTM_PL_FLAG_FALLBACK;
|
||||
}
|
||||
|
||||
placement->placement = requested;
|
||||
placement->busy_placement = busy;
|
||||
placement->num_placement = num_allowed + 1;
|
||||
placement->placement = places;
|
||||
}
|
||||
|
||||
static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
|
||||
|
@ -789,7 +783,8 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
|
|||
int ret;
|
||||
|
||||
/* First try only the requested placement. No eviction. */
|
||||
real_num_busy = fetch_and_zero(&placement->num_busy_placement);
|
||||
real_num_busy = placement->num_placement;
|
||||
placement->num_placement = 1;
|
||||
ret = ttm_bo_validate(bo, placement, &ctx);
|
||||
if (ret) {
|
||||
ret = i915_ttm_err_to_gem(ret);
|
||||
|
@ -805,7 +800,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
|
|||
* If the initial attempt fails, allow all accepted placements,
|
||||
* evicting if necessary.
|
||||
*/
|
||||
placement->num_busy_placement = real_num_busy;
|
||||
placement->num_placement = real_num_busy;
|
||||
ret = ttm_bo_validate(bo, placement, &ctx);
|
||||
if (ret)
|
||||
return i915_ttm_err_to_gem(ret);
|
||||
|
@ -839,7 +834,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
|
|||
|
||||
static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
|
||||
struct ttm_place places[I915_TTM_MAX_PLACEMENTS + 1];
|
||||
struct ttm_placement placement;
|
||||
|
||||
/* restricted by sg_alloc_table */
|
||||
|
@ -849,7 +844,7 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
|
|||
GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
|
||||
|
||||
/* Move to the requested placement. */
|
||||
i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
|
||||
i915_ttm_placement_from_obj(obj, places, &placement);
|
||||
|
||||
return __i915_ttm_get_pages(obj, &placement);
|
||||
}
|
||||
|
@ -879,9 +874,7 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
|
|||
i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
|
||||
obj->base.size, flags);
|
||||
placement.num_placement = 1;
|
||||
placement.num_busy_placement = 1;
|
||||
placement.placement = &requested;
|
||||
placement.busy_placement = &requested;
|
||||
|
||||
ret = __i915_ttm_get_pages(obj, &placement);
|
||||
if (ret)
|
||||
|
|
|
@ -54,7 +54,6 @@ static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
|
|||
pflags |= TTM_PL_FLAG_TOPDOWN;
|
||||
|
||||
lbo->placement.placement = lbo->placements;
|
||||
lbo->placement.busy_placement = lbo->placements;
|
||||
|
||||
if (domain & LSDC_GEM_DOMAIN_VRAM) {
|
||||
lbo->placements[c].mem_type = TTM_PL_VRAM;
|
||||
|
@ -77,7 +76,6 @@ static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
|
|||
}
|
||||
|
||||
lbo->placement.num_placement = c;
|
||||
lbo->placement.num_busy_placement = c;
|
||||
|
||||
for (i = 0; i < c; ++i) {
|
||||
lbo->placements[i].fpfn = 0;
|
||||
|
|
|
@ -403,27 +403,6 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
|
||||
{
|
||||
*n = 0;
|
||||
|
||||
if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
|
||||
pl[*n].mem_type = TTM_PL_VRAM;
|
||||
pl[*n].flags = 0;
|
||||
(*n)++;
|
||||
}
|
||||
if (domain & NOUVEAU_GEM_DOMAIN_GART) {
|
||||
pl[*n].mem_type = TTM_PL_TT;
|
||||
pl[*n].flags = 0;
|
||||
(*n)++;
|
||||
}
|
||||
if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
|
||||
pl[*n].mem_type = TTM_PL_SYSTEM;
|
||||
pl[(*n)++].flags = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
|
||||
{
|
||||
|
@ -451,10 +430,6 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
|
|||
nvbo->placements[i].fpfn = fpfn;
|
||||
nvbo->placements[i].lpfn = lpfn;
|
||||
}
|
||||
for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
|
||||
nvbo->busy_placements[i].fpfn = fpfn;
|
||||
nvbo->busy_placements[i].lpfn = lpfn;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -462,15 +437,32 @@ void
|
|||
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
|
||||
uint32_t busy)
|
||||
{
|
||||
struct ttm_placement *pl = &nvbo->placement;
|
||||
unsigned int *n = &nvbo->placement.num_placement;
|
||||
struct ttm_place *pl = nvbo->placements;
|
||||
|
||||
pl->placement = nvbo->placements;
|
||||
set_placement_list(nvbo->placements, &pl->num_placement, domain);
|
||||
domain |= busy;
|
||||
|
||||
pl->busy_placement = nvbo->busy_placements;
|
||||
set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
|
||||
domain | busy);
|
||||
*n = 0;
|
||||
if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
|
||||
pl[*n].mem_type = TTM_PL_VRAM;
|
||||
pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_VRAM ?
|
||||
TTM_PL_FLAG_FALLBACK : 0;
|
||||
(*n)++;
|
||||
}
|
||||
if (domain & NOUVEAU_GEM_DOMAIN_GART) {
|
||||
pl[*n].mem_type = TTM_PL_TT;
|
||||
pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_GART ?
|
||||
TTM_PL_FLAG_FALLBACK : 0;
|
||||
(*n)++;
|
||||
}
|
||||
if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
|
||||
pl[*n].mem_type = TTM_PL_SYSTEM;
|
||||
pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_CPU ?
|
||||
TTM_PL_FLAG_FALLBACK : 0;
|
||||
(*n)++;
|
||||
}
|
||||
|
||||
nvbo->placement.placement = nvbo->placements;
|
||||
set_placement_range(nvbo, domain);
|
||||
}
|
||||
|
||||
|
@ -1313,11 +1305,6 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
nvbo->placements[i].lpfn = mappable;
|
||||
}
|
||||
|
||||
for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
|
||||
nvbo->busy_placements[i].fpfn = 0;
|
||||
nvbo->busy_placements[i].lpfn = mappable;
|
||||
}
|
||||
|
||||
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@ struct nouveau_bo {
|
|||
struct ttm_placement placement;
|
||||
u32 valid_domains;
|
||||
struct ttm_place placements[3];
|
||||
struct ttm_place busy_placements[3];
|
||||
bool force_coherent;
|
||||
struct ttm_bo_kmap_obj kmap;
|
||||
struct list_head head;
|
||||
|
|
|
@ -66,7 +66,6 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
|
|||
pflag |= TTM_PL_FLAG_TOPDOWN;
|
||||
|
||||
qbo->placement.placement = qbo->placements;
|
||||
qbo->placement.busy_placement = qbo->placements;
|
||||
if (domain == QXL_GEM_DOMAIN_VRAM) {
|
||||
qbo->placements[c].mem_type = TTM_PL_VRAM;
|
||||
qbo->placements[c++].flags = pflag;
|
||||
|
@ -86,7 +85,6 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
|
|||
qbo->placements[c++].flags = 0;
|
||||
}
|
||||
qbo->placement.num_placement = c;
|
||||
qbo->placement.num_busy_placement = c;
|
||||
for (i = 0; i < c; ++i) {
|
||||
qbo->placements[i].fpfn = 0;
|
||||
qbo->placements[i].lpfn = 0;
|
||||
|
|
|
@ -60,9 +60,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
|
|||
|
||||
if (!qxl_ttm_bo_is_qxl_bo(bo)) {
|
||||
placement->placement = &placements;
|
||||
placement->busy_placement = &placements;
|
||||
placement->num_placement = 1;
|
||||
placement->num_busy_placement = 1;
|
||||
return;
|
||||
}
|
||||
qbo = to_qxl_bo(bo);
|
||||
|
|
|
@ -78,7 +78,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
|
|||
u32 c = 0, i;
|
||||
|
||||
rbo->placement.placement = rbo->placements;
|
||||
rbo->placement.busy_placement = rbo->placements;
|
||||
if (domain & RADEON_GEM_DOMAIN_VRAM) {
|
||||
/* Try placing BOs which don't need CPU access outside of the
|
||||
* CPU accessible part of VRAM
|
||||
|
@ -114,7 +113,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
|
|||
}
|
||||
|
||||
rbo->placement.num_placement = c;
|
||||
rbo->placement.num_busy_placement = c;
|
||||
|
||||
for (i = 0; i < c; ++i) {
|
||||
if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
|
||||
|
|
|
@ -92,9 +92,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
|
|||
|
||||
if (!radeon_ttm_bo_is_radeon_bo(bo)) {
|
||||
placement->placement = &placements;
|
||||
placement->busy_placement = &placements;
|
||||
placement->num_placement = 1;
|
||||
placement->num_busy_placement = 1;
|
||||
return;
|
||||
}
|
||||
rbo = container_of(bo, struct radeon_bo, tbo);
|
||||
|
@ -114,15 +112,11 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
|
|||
*/
|
||||
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
|
||||
RADEON_GEM_DOMAIN_GTT);
|
||||
rbo->placement.num_busy_placement = 0;
|
||||
for (i = 0; i < rbo->placement.num_placement; i++) {
|
||||
if (rbo->placements[i].mem_type == TTM_PL_VRAM) {
|
||||
if (rbo->placements[i].fpfn < fpfn)
|
||||
rbo->placements[i].fpfn = fpfn;
|
||||
} else {
|
||||
rbo->placement.busy_placement =
|
||||
&rbo->placements[i];
|
||||
rbo->placement.num_busy_placement = 1;
|
||||
rbo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
|
||||
}
|
||||
}
|
||||
} else
|
||||
|
|
|
@ -324,7 +324,6 @@ void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
|
|||
rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
|
||||
rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
|
||||
rbo->placement.num_placement++;
|
||||
rbo->placement.num_busy_placement++;
|
||||
}
|
||||
|
||||
void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
|
||||
|
|
|
@ -410,8 +410,8 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
|
|||
struct ttm_resource *hop_mem;
|
||||
int ret;
|
||||
|
||||
hop_placement.num_placement = hop_placement.num_busy_placement = 1;
|
||||
hop_placement.placement = hop_placement.busy_placement = hop;
|
||||
hop_placement.num_placement = 1;
|
||||
hop_placement.placement = hop;
|
||||
|
||||
/* find space in the bounce domain */
|
||||
ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
|
||||
|
@ -440,10 +440,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
|
|||
dma_resv_assert_held(bo->base.resv);
|
||||
|
||||
placement.num_placement = 0;
|
||||
placement.num_busy_placement = 0;
|
||||
bdev->funcs->evict_flags(bo, &placement);
|
||||
|
||||
if (!placement.num_placement && !placement.num_busy_placement) {
|
||||
if (!placement.num_placement) {
|
||||
ret = ttm_bo_wait_ctx(bo, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -791,6 +790,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
const struct ttm_place *place = &placement->placement[i];
|
||||
struct ttm_resource_manager *man;
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_FALLBACK)
|
||||
continue;
|
||||
|
||||
man = ttm_manager_type(bdev, place->mem_type);
|
||||
if (!man || !ttm_resource_manager_used(man))
|
||||
continue;
|
||||
|
@ -813,10 +815,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < placement->num_busy_placement; ++i) {
|
||||
const struct ttm_place *place = &placement->busy_placement[i];
|
||||
for (i = 0; i < placement->num_placement; ++i) {
|
||||
const struct ttm_place *place = &placement->placement[i];
|
||||
struct ttm_resource_manager *man;
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_DESIRED)
|
||||
continue;
|
||||
|
||||
man = ttm_manager_type(bdev, place->mem_type);
|
||||
if (!man || !ttm_resource_manager_used(man))
|
||||
continue;
|
||||
|
@ -904,11 +909,11 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
|||
/*
|
||||
* Remove the backing store if no placement is given.
|
||||
*/
|
||||
if (!placement->num_placement && !placement->num_busy_placement)
|
||||
if (!placement->num_placement)
|
||||
return ttm_bo_pipeline_gutting(bo);
|
||||
|
||||
/* Check whether we need to move buffer. */
|
||||
if (bo->resource && ttm_resource_compat(bo->resource, placement))
|
||||
if (bo->resource && ttm_resource_compatible(bo->resource, placement))
|
||||
return 0;
|
||||
|
||||
/* Moving of pinned BOs is forbidden */
|
||||
|
|
|
@ -291,37 +291,15 @@ bool ttm_resource_intersects(struct ttm_device *bdev,
|
|||
}
|
||||
|
||||
/**
|
||||
* ttm_resource_compatible - test for compatibility
|
||||
* ttm_resource_compatible - check if resource is compatible with placement
|
||||
*
|
||||
* @bdev: TTM device structure
|
||||
* @res: The resource to test
|
||||
* @place: The placement to test
|
||||
* @size: How many bytes the new allocation needs.
|
||||
* @res: the resource to check
|
||||
* @placement: the placement to check against
|
||||
*
|
||||
* Test if @res compatible with @place and @size.
|
||||
*
|
||||
* Returns true if the res placement compatible with @place and @size.
|
||||
* Returns true if the placement is compatible.
|
||||
*/
|
||||
bool ttm_resource_compatible(struct ttm_device *bdev,
|
||||
struct ttm_resource *res,
|
||||
const struct ttm_place *place,
|
||||
size_t size)
|
||||
{
|
||||
struct ttm_resource_manager *man;
|
||||
|
||||
if (!res || !place)
|
||||
return false;
|
||||
|
||||
man = ttm_manager_type(bdev, res->mem_type);
|
||||
if (!man->func->compatible)
|
||||
return true;
|
||||
|
||||
return man->func->compatible(man, res, place, size);
|
||||
}
|
||||
|
||||
static bool ttm_resource_places_compat(struct ttm_resource *res,
|
||||
const struct ttm_place *places,
|
||||
unsigned num_placement)
|
||||
bool ttm_resource_compatible(struct ttm_resource *res,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
struct ttm_buffer_object *bo = res->bo;
|
||||
struct ttm_device *bdev = bo->bdev;
|
||||
|
@ -330,44 +308,25 @@ static bool ttm_resource_places_compat(struct ttm_resource *res,
|
|||
if (res->placement & TTM_PL_FLAG_TEMPORARY)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < num_placement; i++) {
|
||||
const struct ttm_place *heap = &places[i];
|
||||
for (i = 0; i < placement->num_placement; i++) {
|
||||
const struct ttm_place *place = &placement->placement[i];
|
||||
struct ttm_resource_manager *man;
|
||||
|
||||
if (!ttm_resource_compatible(bdev, res, heap, bo->base.size))
|
||||
if (res->mem_type != place->mem_type)
|
||||
continue;
|
||||
|
||||
if ((res->mem_type == heap->mem_type) &&
|
||||
(!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
|
||||
man = ttm_manager_type(bdev, res->mem_type);
|
||||
if (man->func->compatible &&
|
||||
!man->func->compatible(man, res, place, bo->base.size))
|
||||
continue;
|
||||
|
||||
if ((!(place->flags & TTM_PL_FLAG_CONTIGUOUS) ||
|
||||
(res->placement & TTM_PL_FLAG_CONTIGUOUS)))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_resource_compat - check if resource is compatible with placement
|
||||
*
|
||||
* @res: the resource to check
|
||||
* @placement: the placement to check against
|
||||
*
|
||||
* Returns true if the placement is compatible.
|
||||
*/
|
||||
bool ttm_resource_compat(struct ttm_resource *res,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
if (ttm_resource_places_compat(res, placement->placement,
|
||||
placement->num_placement))
|
||||
return true;
|
||||
|
||||
if ((placement->busy_placement != placement->placement ||
|
||||
placement->num_busy_placement > placement->num_placement) &&
|
||||
ttm_resource_places_compat(res, placement->busy_placement,
|
||||
placement->num_busy_placement))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void ttm_resource_set_bo(struct ttm_resource *res,
|
||||
struct ttm_buffer_object *bo)
|
||||
{
|
||||
|
|
|
@ -742,9 +742,21 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
|
|||
vmw_resource_unbind_list(vbo);
|
||||
}
|
||||
|
||||
static u32
|
||||
set_placement_list(struct ttm_place *pl, u32 domain)
|
||||
static u32 placement_flags(u32 domain, u32 desired, u32 fallback)
|
||||
{
|
||||
if (desired & fallback & domain)
|
||||
return 0;
|
||||
|
||||
if (desired & domain)
|
||||
return TTM_PL_FLAG_DESIRED;
|
||||
|
||||
return TTM_PL_FLAG_FALLBACK;
|
||||
}
|
||||
|
||||
static u32
|
||||
set_placement_list(struct ttm_place *pl, u32 desired, u32 fallback)
|
||||
{
|
||||
u32 domain = desired | fallback;
|
||||
u32 n = 0;
|
||||
|
||||
/*
|
||||
|
@ -752,35 +764,40 @@ set_placement_list(struct ttm_place *pl, u32 domain)
|
|||
*/
|
||||
if (domain & VMW_BO_DOMAIN_MOB) {
|
||||
pl[n].mem_type = VMW_PL_MOB;
|
||||
pl[n].flags = 0;
|
||||
pl[n].flags = placement_flags(VMW_BO_DOMAIN_MOB, desired,
|
||||
fallback);
|
||||
pl[n].fpfn = 0;
|
||||
pl[n].lpfn = 0;
|
||||
n++;
|
||||
}
|
||||
if (domain & VMW_BO_DOMAIN_GMR) {
|
||||
pl[n].mem_type = VMW_PL_GMR;
|
||||
pl[n].flags = 0;
|
||||
pl[n].flags = placement_flags(VMW_BO_DOMAIN_GMR, desired,
|
||||
fallback);
|
||||
pl[n].fpfn = 0;
|
||||
pl[n].lpfn = 0;
|
||||
n++;
|
||||
}
|
||||
if (domain & VMW_BO_DOMAIN_VRAM) {
|
||||
pl[n].mem_type = TTM_PL_VRAM;
|
||||
pl[n].flags = 0;
|
||||
pl[n].flags = placement_flags(VMW_BO_DOMAIN_VRAM, desired,
|
||||
fallback);
|
||||
pl[n].fpfn = 0;
|
||||
pl[n].lpfn = 0;
|
||||
n++;
|
||||
}
|
||||
if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
|
||||
pl[n].mem_type = VMW_PL_SYSTEM;
|
||||
pl[n].flags = 0;
|
||||
pl[n].flags = placement_flags(VMW_BO_DOMAIN_WAITABLE_SYS,
|
||||
desired, fallback);
|
||||
pl[n].fpfn = 0;
|
||||
pl[n].lpfn = 0;
|
||||
n++;
|
||||
}
|
||||
if (domain & VMW_BO_DOMAIN_SYS) {
|
||||
pl[n].mem_type = TTM_PL_SYSTEM;
|
||||
pl[n].flags = 0;
|
||||
pl[n].flags = placement_flags(VMW_BO_DOMAIN_SYS, desired,
|
||||
fallback);
|
||||
pl[n].fpfn = 0;
|
||||
pl[n].lpfn = 0;
|
||||
n++;
|
||||
|
@ -806,7 +823,7 @@ void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
|
|||
u32 i;
|
||||
|
||||
pl->placement = bo->places;
|
||||
pl->num_placement = set_placement_list(bo->places, domain);
|
||||
pl->num_placement = set_placement_list(bo->places, domain, busy_domain);
|
||||
|
||||
if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
|
||||
for (i = 0; i < pl->num_placement; ++i) {
|
||||
|
@ -821,8 +838,6 @@ void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
|
|||
__func__, bo->tbo.resource->mem_type, domain);
|
||||
}
|
||||
|
||||
pl->busy_placement = bo->busy_places;
|
||||
pl->num_busy_placement = set_placement_list(bo->busy_places, busy_domain);
|
||||
}
|
||||
|
||||
void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
|
||||
|
|
|
@ -46,15 +46,11 @@ static const struct ttm_place sys_placement_flags = {
|
|||
struct ttm_placement vmw_vram_placement = {
|
||||
.num_placement = 1,
|
||||
.placement = &vram_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &vram_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_sys_placement = {
|
||||
.num_placement = 1,
|
||||
.placement = &sys_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &sys_placement_flags
|
||||
};
|
||||
|
||||
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
|
||||
|
|
|
@ -64,6 +64,12 @@
|
|||
/* For multihop handling */
|
||||
#define TTM_PL_FLAG_TEMPORARY (1 << 2)
|
||||
|
||||
/* Placement is never used during eviction */
|
||||
#define TTM_PL_FLAG_DESIRED (1 << 3)
|
||||
|
||||
/* Placement is only used during eviction */
|
||||
#define TTM_PL_FLAG_FALLBACK (1 << 4)
|
||||
|
||||
/**
|
||||
* struct ttm_place
|
||||
*
|
||||
|
@ -86,16 +92,12 @@ struct ttm_place {
|
|||
*
|
||||
* @num_placement: number of preferred placements
|
||||
* @placement: preferred placements
|
||||
* @num_busy_placement: number of preferred placements when need to evict buffer
|
||||
* @busy_placement: preferred placements when need to evict buffer
|
||||
*
|
||||
* Structure indicating the placement you request for an object.
|
||||
*/
|
||||
struct ttm_placement {
|
||||
unsigned num_placement;
|
||||
const struct ttm_place *placement;
|
||||
unsigned num_busy_placement;
|
||||
const struct ttm_place *busy_placement;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -365,12 +365,8 @@ bool ttm_resource_intersects(struct ttm_device *bdev,
|
|||
struct ttm_resource *res,
|
||||
const struct ttm_place *place,
|
||||
size_t size);
|
||||
bool ttm_resource_compatible(struct ttm_device *bdev,
|
||||
struct ttm_resource *res,
|
||||
const struct ttm_place *place,
|
||||
size_t size);
|
||||
bool ttm_resource_compat(struct ttm_resource *res,
|
||||
struct ttm_placement *placement);
|
||||
bool ttm_resource_compatible(struct ttm_resource *res,
|
||||
struct ttm_placement *placement);
|
||||
void ttm_resource_set_bo(struct ttm_resource *res,
|
||||
struct ttm_buffer_object *bo);
|
||||
|
||||
|
|
Loading…
Reference in New Issue