Merge branch 'vmwgfx-next-3.13' of git://people.freedesktop.org/~thomash/linux into drm-next
A resource eviction fix, and a fix for compilation / sparse problems from the previous pull. * 'vmwgfx-next-3.13' of git://people.freedesktop.org/~thomash/linux: drm/vmwgfx: Fix a couple of compile / sparse warnings and errors drm/vmwgfx: Resource evict fixes
This commit is contained in:
@@ -453,12 +453,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
|
|||||||
*/
|
*/
|
||||||
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
||||||
{
|
{
|
||||||
const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
|
|
||||||
static const char *names[vmw_dma_map_max] = {
|
static const char *names[vmw_dma_map_max] = {
|
||||||
[vmw_dma_phys] = "Using physical TTM page addresses.",
|
[vmw_dma_phys] = "Using physical TTM page addresses.",
|
||||||
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
|
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
|
||||||
[vmw_dma_map_populate] = "Keeping DMA mappings.",
|
[vmw_dma_map_populate] = "Keeping DMA mappings.",
|
||||||
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
|
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
|
||||||
|
|
||||||
#ifdef CONFIG_INTEL_IOMMU
|
#ifdef CONFIG_INTEL_IOMMU
|
||||||
if (intel_iommu_enabled) {
|
if (intel_iommu_enabled) {
|
||||||
@@ -500,6 +501,10 @@ out_fixup:
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#else /* CONFIG_X86 */
|
||||||
|
dev_priv->map_mode = vmw_dma_map_populate;
|
||||||
|
#endif /* CONFIG_X86 */
|
||||||
|
|
||||||
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
|
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -145,7 +145,9 @@ static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
|
|||||||
}
|
}
|
||||||
|
|
||||||
page_virtual = kmap_atomic(page);
|
page_virtual = kmap_atomic(page);
|
||||||
desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT;
|
desc_dma = (dma_addr_t)
|
||||||
|
le32_to_cpu(page_virtual[desc_per_page].ppn) <<
|
||||||
|
PAGE_SHIFT;
|
||||||
kunmap_atomic(page_virtual);
|
kunmap_atomic(page_virtual);
|
||||||
|
|
||||||
__free_page(page);
|
__free_page(page);
|
||||||
@@ -217,7 +219,8 @@ static int vmw_gmr_build_descriptors(struct device *dev,
|
|||||||
desc_dma = 0;
|
desc_dma = 0;
|
||||||
list_for_each_entry_reverse(page, desc_pages, lru) {
|
list_for_each_entry_reverse(page, desc_pages, lru) {
|
||||||
page_virtual = kmap_atomic(page);
|
page_virtual = kmap_atomic(page);
|
||||||
page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT;
|
page_virtual[desc_per_page].ppn = cpu_to_le32
|
||||||
|
(desc_dma >> PAGE_SHIFT);
|
||||||
kunmap_atomic(page_virtual);
|
kunmap_atomic(page_virtual);
|
||||||
desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
|
desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
@@ -32,6 +32,8 @@
|
|||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include "vmwgfx_resource_priv.h"
|
#include "vmwgfx_resource_priv.h"
|
||||||
|
|
||||||
|
#define VMW_RES_EVICT_ERR_COUNT 10
|
||||||
|
|
||||||
struct vmw_user_dma_buffer {
|
struct vmw_user_dma_buffer {
|
||||||
struct ttm_base_object base;
|
struct ttm_base_object base;
|
||||||
struct vmw_dma_buffer dma;
|
struct vmw_dma_buffer dma;
|
||||||
@@ -1091,8 +1093,9 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
|
|||||||
* to a backup buffer.
|
* to a backup buffer.
|
||||||
*
|
*
|
||||||
* @res: The resource to evict.
|
* @res: The resource to evict.
|
||||||
|
* @interruptible: Whether to wait interruptible.
|
||||||
*/
|
*/
|
||||||
int vmw_resource_do_evict(struct vmw_resource *res)
|
int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
|
||||||
{
|
{
|
||||||
struct ttm_validate_buffer val_buf;
|
struct ttm_validate_buffer val_buf;
|
||||||
const struct vmw_res_func *func = res->func;
|
const struct vmw_res_func *func = res->func;
|
||||||
@@ -1102,7 +1105,8 @@ int vmw_resource_do_evict(struct vmw_resource *res)
|
|||||||
BUG_ON(!func->may_evict);
|
BUG_ON(!func->may_evict);
|
||||||
|
|
||||||
val_buf.bo = NULL;
|
val_buf.bo = NULL;
|
||||||
ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf);
|
ret = vmw_resource_check_buffer(res, &ticket, interruptible,
|
||||||
|
&val_buf);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@@ -1141,6 +1145,7 @@ int vmw_resource_validate(struct vmw_resource *res)
|
|||||||
struct vmw_private *dev_priv = res->dev_priv;
|
struct vmw_private *dev_priv = res->dev_priv;
|
||||||
struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
|
struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
|
||||||
struct ttm_validate_buffer val_buf;
|
struct ttm_validate_buffer val_buf;
|
||||||
|
unsigned err_count = 0;
|
||||||
|
|
||||||
if (likely(!res->func->may_evict))
|
if (likely(!res->func->may_evict))
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1155,7 +1160,7 @@ int vmw_resource_validate(struct vmw_resource *res)
|
|||||||
|
|
||||||
write_lock(&dev_priv->resource_lock);
|
write_lock(&dev_priv->resource_lock);
|
||||||
if (list_empty(lru_list) || !res->func->may_evict) {
|
if (list_empty(lru_list) || !res->func->may_evict) {
|
||||||
DRM_ERROR("Out of device device id entries "
|
DRM_ERROR("Out of device device resources "
|
||||||
"for %s.\n", res->func->type_name);
|
"for %s.\n", res->func->type_name);
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
write_unlock(&dev_priv->resource_lock);
|
write_unlock(&dev_priv->resource_lock);
|
||||||
@@ -1168,7 +1173,19 @@ int vmw_resource_validate(struct vmw_resource *res)
|
|||||||
list_del_init(&evict_res->lru_head);
|
list_del_init(&evict_res->lru_head);
|
||||||
|
|
||||||
write_unlock(&dev_priv->resource_lock);
|
write_unlock(&dev_priv->resource_lock);
|
||||||
vmw_resource_do_evict(evict_res);
|
|
||||||
|
ret = vmw_resource_do_evict(evict_res, true);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
write_lock(&dev_priv->resource_lock);
|
||||||
|
list_add_tail(&evict_res->lru_head, lru_list);
|
||||||
|
write_unlock(&dev_priv->resource_lock);
|
||||||
|
if (ret == -ERESTARTSYS ||
|
||||||
|
++err_count > VMW_RES_EVICT_ERR_COUNT) {
|
||||||
|
vmw_resource_unreference(&evict_res);
|
||||||
|
goto out_no_validate;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
vmw_resource_unreference(&evict_res);
|
vmw_resource_unreference(&evict_res);
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
@@ -1253,13 +1270,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res)
|
|||||||
* @type: The resource type to evict
|
* @type: The resource type to evict
|
||||||
*
|
*
|
||||||
* To avoid thrashing starvation or as part of the hibernation sequence,
|
* To avoid thrashing starvation or as part of the hibernation sequence,
|
||||||
* evict all evictable resources of a specific type.
|
* try to evict all evictable resources of a specific type.
|
||||||
*/
|
*/
|
||||||
static void vmw_resource_evict_type(struct vmw_private *dev_priv,
|
static void vmw_resource_evict_type(struct vmw_private *dev_priv,
|
||||||
enum vmw_res_type type)
|
enum vmw_res_type type)
|
||||||
{
|
{
|
||||||
struct list_head *lru_list = &dev_priv->res_lru[type];
|
struct list_head *lru_list = &dev_priv->res_lru[type];
|
||||||
struct vmw_resource *evict_res;
|
struct vmw_resource *evict_res;
|
||||||
|
unsigned err_count = 0;
|
||||||
|
int ret;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
write_lock(&dev_priv->resource_lock);
|
write_lock(&dev_priv->resource_lock);
|
||||||
@@ -1272,7 +1291,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
|
|||||||
lru_head));
|
lru_head));
|
||||||
list_del_init(&evict_res->lru_head);
|
list_del_init(&evict_res->lru_head);
|
||||||
write_unlock(&dev_priv->resource_lock);
|
write_unlock(&dev_priv->resource_lock);
|
||||||
vmw_resource_do_evict(evict_res);
|
|
||||||
|
ret = vmw_resource_do_evict(evict_res, false);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
write_lock(&dev_priv->resource_lock);
|
||||||
|
list_add_tail(&evict_res->lru_head, lru_list);
|
||||||
|
write_unlock(&dev_priv->resource_lock);
|
||||||
|
if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
|
||||||
|
vmw_resource_unreference(&evict_res);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
vmw_resource_unreference(&evict_res);
|
vmw_resource_unreference(&evict_res);
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user