Commit ef7bb281 authored by Christoph Bumiller's avatar Christoph Bumiller
Browse files

nv50,nvc0: handle user index buffers

parent fcb28682
......@@ -13,7 +13,7 @@
struct push_context {
struct nouveau_pushbuf *push;
void *idxbuf;
const void *idxbuf;
float edgeflag;
int edgeflag_attr;
......@@ -234,9 +234,13 @@ nv50_push_vbo(struct nv50_context *nv50, const struct pipe_draw_info *info)
}
if (info->indexed) {
ctx.idxbuf = nouveau_resource_map_offset(&nv50->base,
nv04_resource(nv50->idxbuf.buffer),
nv50->idxbuf.offset, NOUVEAU_BO_RD);
if (nv50->idxbuf.buffer) {
ctx.idxbuf = nouveau_resource_map_offset(&nv50->base,
nv04_resource(nv50->idxbuf.buffer), nv50->idxbuf.offset,
NOUVEAU_BO_RD);
} else {
ctx.idxbuf = nv50->idxbuf.user_buffer;
}
if (!ctx.idxbuf)
return;
index_size = nv50->idxbuf.index_size;
......
......@@ -152,9 +152,9 @@ nv50_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
case PIPE_CAP_TGSI_CAN_COMPACT_VARYINGS:
case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
case PIPE_CAP_USER_VERTEX_BUFFERS:
case PIPE_CAP_USER_INDEX_BUFFERS:
return 0; /* state trackers will know better */
case PIPE_CAP_USER_CONSTANT_BUFFERS:
case PIPE_CAP_USER_INDEX_BUFFERS:
return 1;
case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
return 256;
......
......@@ -911,12 +911,15 @@ nv50_set_index_buffer(struct pipe_context *pipe,
if (nv50->idxbuf.buffer)
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_INDEX);
if (ib && ib->buffer) {
if (ib) {
pipe_resource_reference(&nv50->idxbuf.buffer, ib->buffer);
nv50->idxbuf.offset = ib->offset;
nv50->idxbuf.index_size = ib->index_size;
if (nouveau_resource_mapped_by_gpu(ib->buffer))
if (ib->buffer) {
nv50->idxbuf.offset = ib->offset;
BCTX_REFN(nv50->bufctx_3d, INDEX, nv04_resource(ib->buffer), RD);
} else {
nv50->idxbuf.user_buffer = ib->user_buffer;
}
} else {
pipe_resource_reference(&nv50->idxbuf.buffer, NULL);
}
......
......@@ -454,7 +454,7 @@ nv50_draw_arrays(struct nv50_context *nv50,
}
static void
nv50_draw_elements_inline_u08(struct nouveau_pushbuf *push, uint8_t *map,
nv50_draw_elements_inline_u08(struct nouveau_pushbuf *push, const uint8_t *map,
unsigned start, unsigned count)
{
map += start;
......@@ -480,7 +480,7 @@ nv50_draw_elements_inline_u08(struct nouveau_pushbuf *push, uint8_t *map,
}
static void
nv50_draw_elements_inline_u16(struct nouveau_pushbuf *push, uint16_t *map,
nv50_draw_elements_inline_u16(struct nouveau_pushbuf *push, const uint16_t *map,
unsigned start, unsigned count)
{
map += start;
......@@ -503,7 +503,7 @@ nv50_draw_elements_inline_u16(struct nouveau_pushbuf *push, uint16_t *map,
}
static void
nv50_draw_elements_inline_u32(struct nouveau_pushbuf *push, uint32_t *map,
nv50_draw_elements_inline_u32(struct nouveau_pushbuf *push, const uint32_t *map,
unsigned start, unsigned count)
{
map += start;
......@@ -520,7 +520,8 @@ nv50_draw_elements_inline_u32(struct nouveau_pushbuf *push, uint32_t *map,
}
static void
nv50_draw_elements_inline_u32_short(struct nouveau_pushbuf *push, uint32_t *map,
nv50_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
const uint32_t *map,
unsigned start, unsigned count)
{
map += start;
......@@ -548,8 +549,6 @@ nv50_draw_elements(struct nv50_context *nv50, boolean shorten,
unsigned instance_count, int32_t index_bias)
{
struct nouveau_pushbuf *push = nv50->base.pushbuf;
void *data;
struct nv04_resource *buf = nv04_resource(nv50->idxbuf.buffer);
unsigned prim;
const unsigned index_size = nv50->idxbuf.index_size;
......@@ -561,12 +560,13 @@ nv50_draw_elements(struct nv50_context *nv50, boolean shorten,
nv50->state.index_bias = index_bias;
}
if (nouveau_resource_mapped_by_gpu(nv50->idxbuf.buffer)) {
if (nv50->idxbuf.buffer) {
struct nv04_resource *buf = nv04_resource(nv50->idxbuf.buffer);
unsigned pb_start;
unsigned pb_bytes;
const unsigned base = buf->offset;
const unsigned base = buf->offset + nv50->idxbuf.offset;
start += nv50->idxbuf.offset >> (index_size >> 1);
assert(nouveau_resource_mapped_by_gpu(nv50->idxbuf.buffer));
while (instance_count--) {
BEGIN_NV04(push, NV50_3D(VERTEX_BEGIN_GL), 1);
......@@ -609,10 +609,7 @@ nv50_draw_elements(struct nv50_context *nv50, boolean shorten,
prim |= NV50_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
}
} else {
data = nouveau_resource_map_offset(&nv50->base, buf,
nv50->idxbuf.offset, NOUVEAU_BO_RD);
if (!data)
return;
const void *data = nv50->idxbuf.user_buffer;
while (instance_count--) {
BEGIN_NV04(push, NV50_3D(VERTEX_BEGIN_GL), 1);
......@@ -749,8 +746,6 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
if (info->indexed) {
boolean shorten = info->max_index <= 65535;
assert(nv50->idxbuf.buffer);
if (info->primitive_restart != nv50->state.prim_restart) {
if (info->primitive_restart) {
BEGIN_NV04(push, NV50_3D(PRIM_RESTART_ENABLE), 2);
......
......@@ -140,9 +140,9 @@ nvc0_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
case PIPE_CAP_TGSI_CAN_COMPACT_VARYINGS:
case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
case PIPE_CAP_USER_VERTEX_BUFFERS:
case PIPE_CAP_USER_INDEX_BUFFERS:
return 0; /* state trackers will know better */
case PIPE_CAP_USER_CONSTANT_BUFFERS:
case PIPE_CAP_USER_INDEX_BUFFERS:
return 1;
case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
return 256;
......
......@@ -802,11 +802,16 @@ nvc0_set_index_buffer(struct pipe_context *pipe,
if (nvc0->idxbuf.buffer)
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_IDX);
if (ib && ib->buffer) {
nvc0->dirty |= NVC0_NEW_IDXBUF;
if (ib) {
pipe_resource_reference(&nvc0->idxbuf.buffer, ib->buffer);
nvc0->idxbuf.offset = ib->offset;
nvc0->idxbuf.index_size = ib->index_size;
if (ib->buffer) {
nvc0->idxbuf.offset = ib->offset;
nvc0->dirty |= NVC0_NEW_IDXBUF;
} else {
nvc0->idxbuf.user_buffer = ib->user_buffer;
nvc0->dirty &= ~NVC0_NEW_IDXBUF;
}
} else {
nvc0->dirty &= ~NVC0_NEW_IDXBUF;
pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
......
......@@ -427,8 +427,7 @@ nvc0_idxbuf_validate(struct nvc0_context *nvc0)
struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
assert(buf);
if (!nouveau_resource_mapped_by_gpu(&buf->base))
return;
assert(nouveau_resource_mapped_by_gpu(&buf->base));
PUSH_SPACE(push, 6);
BEGIN_NVC0(push, NVC0_3D(INDEX_ARRAY_START_HIGH), 5);
......@@ -507,7 +506,7 @@ nvc0_draw_arrays(struct nvc0_context *nvc0,
}
static void
nvc0_draw_elements_inline_u08(struct nouveau_pushbuf *push, uint8_t *map,
nvc0_draw_elements_inline_u08(struct nouveau_pushbuf *push, const uint8_t *map,
unsigned start, unsigned count)
{
map += start;
......@@ -535,7 +534,7 @@ nvc0_draw_elements_inline_u08(struct nouveau_pushbuf *push, uint8_t *map,
}
static void
nvc0_draw_elements_inline_u16(struct nouveau_pushbuf *push, uint16_t *map,
nvc0_draw_elements_inline_u16(struct nouveau_pushbuf *push, const uint16_t *map,
unsigned start, unsigned count)
{
map += start;
......@@ -560,7 +559,7 @@ nvc0_draw_elements_inline_u16(struct nouveau_pushbuf *push, uint16_t *map,
}
static void
nvc0_draw_elements_inline_u32(struct nouveau_pushbuf *push, uint32_t *map,
nvc0_draw_elements_inline_u32(struct nouveau_pushbuf *push, const uint32_t *map,
unsigned start, unsigned count)
{
map += start;
......@@ -578,7 +577,8 @@ nvc0_draw_elements_inline_u32(struct nouveau_pushbuf *push, uint32_t *map,
}
static void
nvc0_draw_elements_inline_u32_short(struct nouveau_pushbuf *push, uint32_t *map,
nvc0_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
const uint32_t *map,
unsigned start, unsigned count)
{
map += start;
......@@ -608,7 +608,6 @@ nvc0_draw_elements(struct nvc0_context *nvc0, boolean shorten,
unsigned instance_count, int32_t index_bias)
{
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
void *data;
unsigned prim;
const unsigned index_size = nvc0->idxbuf.index_size;
......@@ -621,7 +620,7 @@ nvc0_draw_elements(struct nvc0_context *nvc0, boolean shorten,
nvc0->state.index_bias = index_bias;
}
if (nouveau_resource_mapped_by_gpu(nvc0->idxbuf.buffer)) {
if (nvc0->idxbuf.buffer) {
PUSH_SPACE(push, 1);
IMMED_NVC0(push, NVC0_3D(VERTEX_BEGIN_GL), prim);
do {
......@@ -637,11 +636,7 @@ nvc0_draw_elements(struct nvc0_context *nvc0, boolean shorten,
} while (instance_count);
IMMED_NVC0(push, NVC0_3D(VERTEX_END_GL), 0);
} else {
data = nouveau_resource_map_offset(&nvc0->base,
nv04_resource(nvc0->idxbuf.buffer),
nvc0->idxbuf.offset, NOUVEAU_BO_RD);
if (!data)
return;
const void *data = nvc0->idxbuf.user_buffer;
while (instance_count--) {
PUSH_SPACE(push, 2);
......@@ -768,8 +763,6 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
if (info->indexed) {
boolean shorten = info->max_index <= 65535;
assert(nvc0->idxbuf.buffer);
if (info->primitive_restart != nvc0->state.prim_restart) {
if (info->primitive_restart) {
BEGIN_NVC0(push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
......
......@@ -78,11 +78,13 @@ nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
static INLINE void
nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0)
{
struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
unsigned offset = nvc0->idxbuf.offset;
ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
buf, offset, NOUVEAU_BO_RD);
if (nvc0->idxbuf.buffer) {
struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
buf, nvc0->idxbuf.offset, NOUVEAU_BO_RD);
} else {
ctx->idxbuf = nvc0->idxbuf.user_buffer;
}
}
static INLINE void
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment