drm/ttm: introduce utility function to free an allocated memory node
Existing core code/drivers call drm_mm_put_block on ttm_mem_reg.mm_node directly. Future patches will modify TTM behaviour in such a way that ttm_mem_reg.mm_node doesn't necessarily belong to drm_mm. Reviewed-by: Jerome Glisse <jglisse@redhat.com> Acked-by: Thomas Hellström <thellstrom@vmware.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
@@ -693,12 +693,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
|
|||||||
|
|
||||||
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
|
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
|
||||||
out:
|
out:
|
||||||
if (tmp_mem.mm_node) {
|
ttm_bo_mem_put(bo, &tmp_mem);
|
||||||
spin_lock(&bo->bdev->glob->lru_lock);
|
|
||||||
drm_mm_put_block(tmp_mem.mm_node);
|
|
||||||
spin_unlock(&bo->bdev->glob->lru_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -731,12 +726,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (tmp_mem.mm_node) {
|
ttm_bo_mem_put(bo, &tmp_mem);
|
||||||
spin_lock(&bo->bdev->glob->lru_lock);
|
|
||||||
drm_mm_put_block(tmp_mem.mm_node);
|
|
||||||
spin_unlock(&bo->bdev->glob->lru_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -326,14 +326,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
|
|||||||
}
|
}
|
||||||
r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
|
r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
|
||||||
out_cleanup:
|
out_cleanup:
|
||||||
if (tmp_mem.mm_node) {
|
ttm_bo_mem_put(bo, &tmp_mem);
|
||||||
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
|
|
||||||
|
|
||||||
spin_lock(&glob->lru_lock);
|
|
||||||
drm_mm_put_block(tmp_mem.mm_node);
|
|
||||||
spin_unlock(&glob->lru_lock);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -372,14 +365,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
|
|||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
out_cleanup:
|
out_cleanup:
|
||||||
if (tmp_mem.mm_node) {
|
ttm_bo_mem_put(bo, &tmp_mem);
|
||||||
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
|
|
||||||
|
|
||||||
spin_lock(&glob->lru_lock);
|
|
||||||
drm_mm_put_block(tmp_mem.mm_node);
|
|
||||||
spin_unlock(&glob->lru_lock);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -475,11 +475,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
|
|||||||
list_del_init(&bo->ddestroy);
|
list_del_init(&bo->ddestroy);
|
||||||
++put_count;
|
++put_count;
|
||||||
}
|
}
|
||||||
if (bo->mem.mm_node) {
|
|
||||||
drm_mm_put_block(bo->mem.mm_node);
|
|
||||||
bo->mem.mm_node = NULL;
|
|
||||||
}
|
|
||||||
spin_unlock(&glob->lru_lock);
|
spin_unlock(&glob->lru_lock);
|
||||||
|
ttm_bo_mem_put(bo, &bo->mem);
|
||||||
|
|
||||||
atomic_set(&bo->reserved, 0);
|
atomic_set(&bo->reserved, 0);
|
||||||
|
|
||||||
@@ -621,7 +618,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
|||||||
bool no_wait_reserve, bool no_wait_gpu)
|
bool no_wait_reserve, bool no_wait_gpu)
|
||||||
{
|
{
|
||||||
struct ttm_bo_device *bdev = bo->bdev;
|
struct ttm_bo_device *bdev = bo->bdev;
|
||||||
struct ttm_bo_global *glob = bo->glob;
|
|
||||||
struct ttm_mem_reg evict_mem;
|
struct ttm_mem_reg evict_mem;
|
||||||
struct ttm_placement placement;
|
struct ttm_placement placement;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@@ -667,12 +663,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
|||||||
if (ret) {
|
if (ret) {
|
||||||
if (ret != -ERESTARTSYS)
|
if (ret != -ERESTARTSYS)
|
||||||
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
|
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
|
||||||
spin_lock(&glob->lru_lock);
|
ttm_bo_mem_put(bo, &evict_mem);
|
||||||
if (evict_mem.mm_node) {
|
|
||||||
drm_mm_put_block(evict_mem.mm_node);
|
|
||||||
evict_mem.mm_node = NULL;
|
|
||||||
}
|
|
||||||
spin_unlock(&glob->lru_lock);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
bo->evicted = true;
|
bo->evicted = true;
|
||||||
@@ -769,6 +760,19 @@ static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
|
||||||
|
{
|
||||||
|
struct ttm_bo_global *glob = bo->glob;
|
||||||
|
|
||||||
|
if (mem->mm_node) {
|
||||||
|
spin_lock(&glob->lru_lock);
|
||||||
|
drm_mm_put_block(mem->mm_node);
|
||||||
|
spin_unlock(&glob->lru_lock);
|
||||||
|
mem->mm_node = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_bo_mem_put);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Repeatedly evict memory from the LRU for @mem_type until we create enough
|
* Repeatedly evict memory from the LRU for @mem_type until we create enough
|
||||||
* space, or we've evicted everything and there isn't enough space.
|
* space, or we've evicted everything and there isn't enough space.
|
||||||
|
@@ -39,14 +39,7 @@
|
|||||||
|
|
||||||
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
|
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
|
||||||
{
|
{
|
||||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
ttm_bo_mem_put(bo, &bo->mem);
|
||||||
|
|
||||||
if (old_mem->mm_node) {
|
|
||||||
spin_lock(&bo->glob->lru_lock);
|
|
||||||
drm_mm_put_block(old_mem->mm_node);
|
|
||||||
spin_unlock(&bo->glob->lru_lock);
|
|
||||||
}
|
|
||||||
old_mem->mm_node = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||||
|
@@ -649,6 +649,10 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||||||
struct ttm_mem_reg *mem,
|
struct ttm_mem_reg *mem,
|
||||||
bool interruptible,
|
bool interruptible,
|
||||||
bool no_wait_reserve, bool no_wait_gpu);
|
bool no_wait_reserve, bool no_wait_gpu);
|
||||||
|
|
||||||
|
extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
|
||||||
|
struct ttm_mem_reg *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ttm_bo_wait_for_cpu
|
* ttm_bo_wait_for_cpu
|
||||||
*
|
*
|
||||||
|
Reference in New Issue
Block a user