Merge branch 'drm-ttm-unmappable' into drm-core-next
* drm-ttm-unmappable: drm/radeon/kms: enable use of unmappable VRAM V2 drm/ttm: remove io_ field from TTM V6 drm/vmwgfx: add support for new TTM fault callback V5 drm/nouveau/kms: add support for new TTM fault callback V5 drm/radeon/kms: add support for new fault callback V7 drm/ttm: ttm_fault callback to allow driver to handle bo placement V6 drm/ttm: split no_wait argument in 2 GPU or reserve wait Conflicts: drivers/gpu/drm/nouveau/nouveau_bo.c
This commit is contained in:
@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
|
||||
}
|
||||
|
||||
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||
bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
|
||||
bool evict, bool no_wait_reserve,
|
||||
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_ttm);
|
||||
|
||||
int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!mem->bus.io_reserved) {
|
||||
mem->bus.io_reserved = true;
|
||||
ret = bdev->driver->io_mem_reserve(bdev, mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
{
|
||||
if (bdev->driver->io_mem_reserve) {
|
||||
if (mem->bus.io_reserved) {
|
||||
mem->bus.io_reserved = false;
|
||||
bdev->driver->io_mem_free(bdev, mem);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
||||
void **virtual)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
unsigned long bus_base;
|
||||
int ret;
|
||||
void *addr;
|
||||
|
||||
*virtual = NULL;
|
||||
ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
|
||||
if (ret || bus_size == 0)
|
||||
ret = ttm_mem_io_reserve(bdev, mem);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
|
||||
addr = (void *)(((u8 *) man->io_addr) + bus_offset);
|
||||
else {
|
||||
if (mem->bus.addr) {
|
||||
addr = mem->bus.addr;
|
||||
} else {
|
||||
if (mem->placement & TTM_PL_FLAG_WC)
|
||||
addr = ioremap_wc(bus_base + bus_offset, bus_size);
|
||||
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
|
||||
else
|
||||
addr = ioremap_nocache(bus_base + bus_offset, bus_size);
|
||||
if (!addr)
|
||||
addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
|
||||
if (!addr) {
|
||||
ttm_mem_io_free(bdev, mem);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
*virtual = addr;
|
||||
return 0;
|
||||
@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
||||
|
||||
man = &bdev->man[mem->mem_type];
|
||||
|
||||
if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
|
||||
if (virtual && mem->bus.addr == NULL)
|
||||
iounmap(virtual);
|
||||
ttm_mem_io_free(bdev, mem);
|
||||
}
|
||||
|
||||
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
|
||||
@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
||||
}
|
||||
|
||||
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
|
||||
bool evict, bool no_wait_reserve, bool no_wait_gpu,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
|
||||
@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
|
||||
EXPORT_SYMBOL(ttm_io_prot);
|
||||
|
||||
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
|
||||
unsigned long bus_base,
|
||||
unsigned long bus_offset,
|
||||
unsigned long bus_size,
|
||||
unsigned long offset,
|
||||
unsigned long size,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
|
||||
if (bo->mem.bus.addr) {
|
||||
map->bo_kmap_type = ttm_bo_map_premapped;
|
||||
map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
|
||||
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
|
||||
} else {
|
||||
map->bo_kmap_type = ttm_bo_map_iomap;
|
||||
if (mem->placement & TTM_PL_FLAG_WC)
|
||||
map->virtual = ioremap_wc(bus_base + bus_offset,
|
||||
bus_size);
|
||||
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
|
||||
size);
|
||||
else
|
||||
map->virtual = ioremap_nocache(bus_base + bus_offset,
|
||||
bus_size);
|
||||
map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
|
||||
size);
|
||||
}
|
||||
return (!map->virtual) ? -ENOMEM : 0;
|
||||
}
|
||||
@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
|
||||
unsigned long start_page, unsigned long num_pages,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
unsigned long offset, size;
|
||||
int ret;
|
||||
unsigned long bus_base;
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
|
||||
BUG_ON(!list_empty(&bo->swap));
|
||||
map->virtual = NULL;
|
||||
map->bo = bo;
|
||||
if (num_pages > bo->num_pages)
|
||||
return -EINVAL;
|
||||
if (start_page > bo->num_pages)
|
||||
@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
|
||||
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
|
||||
return -EPERM;
|
||||
#endif
|
||||
ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
|
||||
&bus_offset, &bus_size);
|
||||
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (bus_size == 0) {
|
||||
if (!bo->mem.bus.is_iomem) {
|
||||
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
|
||||
} else {
|
||||
bus_offset += start_page << PAGE_SHIFT;
|
||||
bus_size = num_pages << PAGE_SHIFT;
|
||||
return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
|
||||
offset = start_page << PAGE_SHIFT;
|
||||
size = num_pages << PAGE_SHIFT;
|
||||
return ttm_bo_ioremap(bo, offset, size, map);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_kmap);
|
||||
@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
|
||||
switch (map->bo_kmap_type) {
|
||||
case ttm_bo_map_iomap:
|
||||
iounmap(map->virtual);
|
||||
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
|
||||
break;
|
||||
case ttm_bo_map_vmap:
|
||||
vunmap(map->virtual);
|
||||
@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_kunmap);
|
||||
|
||||
int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
|
||||
unsigned long dst_offset,
|
||||
unsigned long *pfn, pgprot_t *prot)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
unsigned long bus_offset;
|
||||
unsigned long bus_size;
|
||||
unsigned long bus_base;
|
||||
int ret;
|
||||
ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
|
||||
&bus_size);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
if (bus_size != 0)
|
||||
*pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
|
||||
else
|
||||
if (!bo->ttm)
|
||||
return -EINVAL;
|
||||
else
|
||||
*pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
|
||||
dst_offset >>
|
||||
PAGE_SHIFT));
|
||||
*prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
|
||||
PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
void *sync_obj,
|
||||
void *sync_obj_arg,
|
||||
bool evict, bool no_wait,
|
||||
bool evict, bool no_wait_reserve,
|
||||
bool no_wait_gpu,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
|
Reference in New Issue
Block a user