Revert "ttm: Include the 'struct dev' when using the DMA API."
This reverts commit 5a893fc28f
.
This causes a use after free in the ttm free alloc pages path,
when it tries to get the be after the be has been destroyed.
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
@@ -409,7 +409,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
dev_priv->ttm.bdev.dev = dev->dev;
|
|
||||||
ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
|
ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
|
||||||
dev_priv->ttm.bo_global_ref.ref.object,
|
dev_priv->ttm.bo_global_ref.ref.object,
|
||||||
&nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
|
&nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
|
||||||
|
@@ -513,7 +513,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
|
|||||||
if (r) {
|
if (r) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
rdev->mman.bdev.dev = rdev->dev;
|
|
||||||
/* No others user of address space so set it to 0 */
|
/* No others user of address space so set it to 0 */
|
||||||
r = ttm_bo_device_init(&rdev->mman.bdev,
|
r = ttm_bo_device_init(&rdev->mman.bdev,
|
||||||
rdev->mman.bo_global_ref.ref.object,
|
rdev->mman.bo_global_ref.ref.object,
|
||||||
|
@@ -664,7 +664,7 @@ out:
|
|||||||
*/
|
*/
|
||||||
int ttm_get_pages(struct list_head *pages, int flags,
|
int ttm_get_pages(struct list_head *pages, int flags,
|
||||||
enum ttm_caching_state cstate, unsigned count,
|
enum ttm_caching_state cstate, unsigned count,
|
||||||
dma_addr_t *dma_address, struct device *dev)
|
dma_addr_t *dma_address)
|
||||||
{
|
{
|
||||||
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||||
struct page *p = NULL;
|
struct page *p = NULL;
|
||||||
@@ -685,7 +685,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
|
|||||||
for (r = 0; r < count; ++r) {
|
for (r = 0; r < count; ++r) {
|
||||||
if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
|
if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
|
||||||
void *addr;
|
void *addr;
|
||||||
addr = dma_alloc_coherent(dev, PAGE_SIZE,
|
addr = dma_alloc_coherent(NULL, PAGE_SIZE,
|
||||||
&dma_address[r],
|
&dma_address[r],
|
||||||
gfp_flags);
|
gfp_flags);
|
||||||
if (addr == NULL)
|
if (addr == NULL)
|
||||||
@@ -730,7 +730,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
|
|||||||
printk(KERN_ERR TTM_PFX
|
printk(KERN_ERR TTM_PFX
|
||||||
"Failed to allocate extra pages "
|
"Failed to allocate extra pages "
|
||||||
"for large request.");
|
"for large request.");
|
||||||
ttm_put_pages(pages, 0, flags, cstate, NULL, NULL);
|
ttm_put_pages(pages, 0, flags, cstate, NULL);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -741,8 +741,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
|
|||||||
|
|
||||||
/* Put all pages in pages list to correct pool to wait for reuse */
|
/* Put all pages in pages list to correct pool to wait for reuse */
|
||||||
void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
|
void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
|
||||||
enum ttm_caching_state cstate, dma_addr_t *dma_address,
|
enum ttm_caching_state cstate, dma_addr_t *dma_address)
|
||||||
struct device *dev)
|
|
||||||
{
|
{
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||||
@@ -758,7 +757,7 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
|
|||||||
void *addr = page_address(p);
|
void *addr = page_address(p);
|
||||||
WARN_ON(!addr || !dma_address[r]);
|
WARN_ON(!addr || !dma_address[r]);
|
||||||
if (addr)
|
if (addr)
|
||||||
dma_free_coherent(dev, PAGE_SIZE,
|
dma_free_coherent(NULL, PAGE_SIZE,
|
||||||
addr,
|
addr,
|
||||||
dma_address[r]);
|
dma_address[r]);
|
||||||
dma_address[r] = 0;
|
dma_address[r] = 0;
|
||||||
|
@@ -110,7 +110,7 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
|
|||||||
INIT_LIST_HEAD(&h);
|
INIT_LIST_HEAD(&h);
|
||||||
|
|
||||||
ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
|
ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
|
||||||
&ttm->dma_address[index], ttm->be->bdev->dev);
|
&ttm->dma_address[index]);
|
||||||
|
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return NULL;
|
return NULL;
|
||||||
@@ -304,7 +304,7 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
|
ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
|
||||||
ttm->dma_address, ttm->be->bdev->dev);
|
ttm->dma_address);
|
||||||
ttm->state = tt_unpopulated;
|
ttm->state = tt_unpopulated;
|
||||||
ttm->first_himem_page = ttm->num_pages;
|
ttm->first_himem_page = ttm->num_pages;
|
||||||
ttm->last_lomem_page = -1;
|
ttm->last_lomem_page = -1;
|
||||||
|
@@ -322,7 +322,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||||||
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
||||||
dev_priv->active_master = &dev_priv->fbdev_master;
|
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||||
|
|
||||||
dev_priv->bdev.dev = dev->dev;
|
|
||||||
ret = ttm_bo_device_init(&dev_priv->bdev,
|
ret = ttm_bo_device_init(&dev_priv->bdev,
|
||||||
dev_priv->bo_global_ref.ref.object,
|
dev_priv->bo_global_ref.ref.object,
|
||||||
&vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
|
&vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
|
||||||
|
@@ -551,7 +551,6 @@ struct ttm_bo_device {
|
|||||||
struct list_head device_list;
|
struct list_head device_list;
|
||||||
struct ttm_bo_global *glob;
|
struct ttm_bo_global *glob;
|
||||||
struct ttm_bo_driver *driver;
|
struct ttm_bo_driver *driver;
|
||||||
struct device *dev;
|
|
||||||
rwlock_t vm_lock;
|
rwlock_t vm_lock;
|
||||||
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
|
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
|
||||||
spinlock_t fence_lock;
|
spinlock_t fence_lock;
|
||||||
|
@@ -37,14 +37,12 @@
|
|||||||
* @cstate: ttm caching state for the page.
|
* @cstate: ttm caching state for the page.
|
||||||
* @count: number of pages to allocate.
|
* @count: number of pages to allocate.
|
||||||
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
|
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
|
||||||
* @dev: struct device for appropiate DMA accounting.
|
|
||||||
*/
|
*/
|
||||||
int ttm_get_pages(struct list_head *pages,
|
int ttm_get_pages(struct list_head *pages,
|
||||||
int flags,
|
int flags,
|
||||||
enum ttm_caching_state cstate,
|
enum ttm_caching_state cstate,
|
||||||
unsigned count,
|
unsigned count,
|
||||||
dma_addr_t *dma_address,
|
dma_addr_t *dma_address);
|
||||||
struct device *dev);
|
|
||||||
/**
|
/**
|
||||||
* Put linked list of pages to pool.
|
* Put linked list of pages to pool.
|
||||||
*
|
*
|
||||||
@@ -54,14 +52,12 @@ int ttm_get_pages(struct list_head *pages,
|
|||||||
* @flags: ttm flags for page allocation.
|
* @flags: ttm flags for page allocation.
|
||||||
* @cstate: ttm caching state.
|
* @cstate: ttm caching state.
|
||||||
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
|
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
|
||||||
* @dev: struct device for appropiate DMA accounting.
|
|
||||||
*/
|
*/
|
||||||
void ttm_put_pages(struct list_head *pages,
|
void ttm_put_pages(struct list_head *pages,
|
||||||
unsigned page_count,
|
unsigned page_count,
|
||||||
int flags,
|
int flags,
|
||||||
enum ttm_caching_state cstate,
|
enum ttm_caching_state cstate,
|
||||||
dma_addr_t *dma_address,
|
dma_addr_t *dma_address);
|
||||||
struct device *dev);
|
|
||||||
/**
|
/**
|
||||||
* Initialize pool allocator.
|
* Initialize pool allocator.
|
||||||
*/
|
*/
|
||||||
|
Reference in New Issue
Block a user