drm/nouveau/core: have fifo store a unique context identifier at attach time
This value will match something that's easily available from the engine IRQ handlers, and used to lookup the relevant context. Since the changes in how this is done on each generation match when the major PFIFO changes happened, fifo is responsible for calculating the correct value to avoid duplicating the same code among many engine modules. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
@@ -105,6 +105,7 @@ nouveau_engctx_create_(struct nouveau_object *parent,
|
|||||||
if (client->vm)
|
if (client->vm)
|
||||||
atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
|
atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
|
||||||
list_add(&nv_engctx(engctx)->head, &engine->contexts);
|
list_add(&nv_engctx(engctx)->head, &engine->contexts);
|
||||||
|
nv_engctx(engctx)->addr = ~0ULL;
|
||||||
spin_unlock_irqrestore(&engine->lock, save);
|
spin_unlock_irqrestore(&engine->lock, save);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -102,6 +102,14 @@ nv04_fifo_object_detach(struct nouveau_object *parent, int cookie)
|
|||||||
mutex_unlock(&nv_subdev(priv)->mutex);
|
mutex_unlock(&nv_subdev(priv)->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
nv04_fifo_context_attach(struct nouveau_object *parent,
|
||||||
|
struct nouveau_object *object)
|
||||||
|
{
|
||||||
|
nv_engctx(object)->addr = nouveau_fifo_chan(parent)->chid;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nv04_fifo_chan_ctor(struct nouveau_object *parent,
|
nv04_fifo_chan_ctor(struct nouveau_object *parent,
|
||||||
struct nouveau_object *engine,
|
struct nouveau_object *engine,
|
||||||
@@ -127,6 +135,7 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
|
|||||||
|
|
||||||
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
|
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
|
||||||
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
||||||
|
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
|
||||||
chan->ramfc = chan->base.chid * 32;
|
chan->ramfc = chan->base.chid * 32;
|
||||||
|
|
||||||
nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
|
nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
|
||||||
|
@@ -78,6 +78,7 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
|
|||||||
|
|
||||||
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
|
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
|
||||||
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
||||||
|
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
|
||||||
chan->ramfc = chan->base.chid * 32;
|
chan->ramfc = chan->base.chid * 32;
|
||||||
|
|
||||||
nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
|
nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
|
||||||
|
@@ -85,6 +85,7 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
|
|||||||
|
|
||||||
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
|
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
|
||||||
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
||||||
|
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
|
||||||
chan->ramfc = chan->base.chid * 64;
|
chan->ramfc = chan->base.chid * 64;
|
||||||
|
|
||||||
nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
|
nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
|
||||||
|
@@ -128,11 +128,12 @@ nv40_fifo_context_attach(struct nouveau_object *parent,
|
|||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->base.lock, flags);
|
spin_lock_irqsave(&priv->base.lock, flags);
|
||||||
|
nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
|
||||||
nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
|
nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
|
||||||
|
|
||||||
if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
|
if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
|
||||||
nv_wr32(priv, reg, nv_gpuobj(engctx)->addr >> 4);
|
nv_wr32(priv, reg, nv_engctx(engctx)->addr);
|
||||||
nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_gpuobj(engctx)->addr >> 4);
|
nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
|
||||||
|
|
||||||
nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
|
nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
|
||||||
spin_unlock_irqrestore(&priv->base.lock, flags);
|
spin_unlock_irqrestore(&priv->base.lock, flags);
|
||||||
|
@@ -81,6 +81,7 @@ nv50_fifo_context_attach(struct nouveau_object *parent,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||||
nv_wo32(base->eng, addr + 0x00, 0x00190000);
|
nv_wo32(base->eng, addr + 0x00, 0x00190000);
|
||||||
nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
|
nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
|
||||||
nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
|
nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
|
||||||
|
@@ -62,6 +62,7 @@ nv84_fifo_context_attach(struct nouveau_object *parent,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||||
nv_wo32(base->eng, addr + 0x00, 0x00190000);
|
nv_wo32(base->eng, addr + 0x00, 0x00190000);
|
||||||
nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
|
nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
|
||||||
nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
|
nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
|
||||||
|
@@ -112,6 +112,8 @@ nvc0_fifo_context_attach(struct nouveau_object *parent,
|
|||||||
NV_MEM_ACCESS_RW, &ectx->vma);
|
NV_MEM_ACCESS_RW, &ectx->vma);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
|
nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
|
||||||
|
@@ -147,6 +147,8 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
|
|||||||
NV_MEM_ACCESS_RW, &ectx->vma);
|
NV_MEM_ACCESS_RW, &ectx->vma);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
|
nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
|
||||||
|
@@ -13,6 +13,7 @@ struct nouveau_engctx {
|
|||||||
struct nouveau_gpuobj base;
|
struct nouveau_gpuobj base;
|
||||||
struct nouveau_vma vma;
|
struct nouveau_vma vma;
|
||||||
struct list_head head;
|
struct list_head head;
|
||||||
|
u64 addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct nouveau_engctx *
|
static inline struct nouveau_engctx *
|
||||||
|
@@ -106,5 +106,6 @@ extern struct nouveau_oclass nvc0_fifo_oclass;
|
|||||||
extern struct nouveau_oclass nve0_fifo_oclass;
|
extern struct nouveau_oclass nve0_fifo_oclass;
|
||||||
|
|
||||||
void nv04_fifo_intr(struct nouveau_subdev *);
|
void nv04_fifo_intr(struct nouveau_subdev *);
|
||||||
|
int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
Reference in New Issue
Block a user