From: Thomas Hellstrom <[email protected]>

We're gradually moving towards using DMA coherent memory in most
situations, although TTM interactions with the DMA layers is still a
work-in-progress. Meanwhile, use coherent memory when there are size
restrictions meaning that there is a chance that streaming dma mapping
of large buffer objects may fail.
Also move DMA mask settings to the vmw_dma_select_mode function, since
it's important that we set the correct DMA masks before calling the
dma_max_mapping_size() function.

Cc: Christoph Hellwig <[email protected]>
Signed-off-by: Thomas Hellstrom <[email protected]>
Reviewed-by: Brian Paul <[email protected]>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 31 +++++++----------------------
 1 file changed, 7 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index fc0283659c41..1e1de83908fe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -569,7 +569,10 @@ static int vmw_dma_select_mode(struct vmw_private 
*dev_priv)
                [vmw_dma_map_populate] = "Caching DMA mappings.",
                [vmw_dma_map_bind] = "Giving up DMA mappings early."};
 
-       if (vmw_force_coherent)
+       (void) dma_set_mask_and_coherent(dev_priv->dev->dev, DMA_BIT_MASK(64));
+
+       if (vmw_force_coherent ||
+           dma_max_mapping_size(dev_priv->dev->dev) != SIZE_MAX)
                dev_priv->map_mode = vmw_dma_alloc_coherent;
        else if (vmw_restrict_iommu)
                dev_priv->map_mode = vmw_dma_map_bind;
@@ -582,30 +585,15 @@ static int vmw_dma_select_mode(struct vmw_private 
*dev_priv)
                return -EINVAL;
 
        DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
-       return 0;
-}
 
-/**
- * vmw_dma_masks - set required page- and dma masks
- *
- * @dev: Pointer to struct drm-device
- *
- * With 32-bit we can only handle 32 bit PFNs. Optionally set that
- * restriction also for 64-bit systems.
- */
-static int vmw_dma_masks(struct vmw_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       int ret = 0;
-
-       ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
        if (dev_priv->map_mode != vmw_dma_phys &&
            (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
                DRM_INFO("Restricting DMA addresses to 44 bits.\n");
-               return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
+               return dma_set_mask_and_coherent(dev_priv->dev->dev,
+                                                DMA_BIT_MASK(44));
        }
 
-       return ret;
+       return 0;
 }
 
 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
@@ -674,7 +662,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned 
long chipset)
                dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
        }
 
-
        ret = vmw_dma_select_mode(dev_priv);
        if (unlikely(ret != 0)) {
                DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
@@ -746,10 +733,6 @@ static int vmw_driver_load(struct drm_device *dev, 
unsigned long chipset)
        if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
                vmw_print_capabilities2(dev_priv->capabilities2);
 
-       ret = vmw_dma_masks(dev_priv);
-       if (unlikely(ret != 0))
-               goto out_err0;
-
        dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK,
                                             SCATTERLIST_MAX_SEGMENT));
 
-- 
2.21.0

_______________________________________________
dri-devel mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to