Commit cd0dec0d authored by Samuel Pitoiset's avatar Samuel Pitoiset
Browse files

nouveau: use bool instead of boolean


Signed-off-by: Samuel Pitoiset's avatarSamuel Pitoiset <samuel.pitoiset@gmail.com>
Acked-by: Ilia Mirkin's avatarIlia Mirkin <imirkin@alum.mit.edu>
parent 4be30fcd
......@@ -151,10 +151,10 @@ struct nv50_ir_prog_info
} gp;
struct {
unsigned numColourResults;
boolean writesDepth;
boolean earlyFragTests;
boolean separateFragData;
boolean usesDiscard;
bool writesDepth;
bool earlyFragTests;
bool separateFragData;
bool usesDiscard;
} fp;
struct {
uint32_t inputOffset; /* base address for user args */
......@@ -180,11 +180,11 @@ struct nv50_ir_prog_info
int8_t viewportId; /* output index of ViewportIndex */
uint8_t fragDepth; /* output index of FragDepth */
uint8_t sampleMask; /* output index of SampleMask */
boolean sampleInterp; /* perform sample interp on all fp inputs */
bool sampleInterp; /* perform sample interp on all fp inputs */
uint8_t backFaceColor[2]; /* input/output indices of back face colour */
uint8_t globalAccess; /* 1 for read, 2 for wr, 3 for rw */
boolean fp64; /* program uses fp64 math */
boolean nv50styleSurfaces; /* generate gX[] access for raw buffers */
bool fp64; /* program uses fp64 math */
bool nv50styleSurfaces; /* generate gX[] access for raw buffers */
uint8_t resInfoCBSlot; /* cX[] used for tex handles, surface info */
uint16_t texBindBase; /* base address for tex handles (nve4) */
uint16_t suInfoBase; /* base address for surface info (nve4) */
......
......@@ -826,7 +826,7 @@ Source::Source(struct nv50_ir_prog_info *prog) : info(prog)
if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
tgsi_dump(tokens, 0);
mainTempsInLMem = FALSE;
mainTempsInLMem = false;
}
Source::~Source()
......@@ -937,7 +937,7 @@ void Source::scanProperty(const struct tgsi_full_property *prop)
info->prop.gp.instanceCount = prop->u[0].Data;
break;
case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS:
info->prop.fp.separateFragData = TRUE;
info->prop.fp.separateFragData = true;
break;
case TGSI_PROPERTY_FS_COORD_ORIGIN:
case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER:
......@@ -1155,7 +1155,7 @@ bool Source::scanInstruction(const struct tgsi_full_instruction *inst)
} else
if (insn.getDst(0).getFile() == TGSI_FILE_TEMPORARY) {
if (insn.getDst(0).isIndirect(0))
mainTempsInLMem = TRUE;
mainTempsInLMem = true;
}
}
......@@ -1163,7 +1163,7 @@ bool Source::scanInstruction(const struct tgsi_full_instruction *inst)
Instruction::SrcRegister src = insn.getSrc(s);
if (src.getFile() == TGSI_FILE_TEMPORARY) {
if (src.isIndirect(0))
mainTempsInLMem = TRUE;
mainTempsInLMem = true;
} else
if (src.getFile() == TGSI_FILE_RESOURCE) {
if (src.getIndex(0) == TGSI_RESOURCE_GLOBAL)
......
......@@ -176,7 +176,7 @@ GM107LoweringPass::handlePOPCNT(Instruction *i)
i->getSrc(0), i->getSrc(1));
i->setSrc(0, tmp);
i->setSrc(1, NULL);
return TRUE;
return true;
}
//
......
......@@ -28,7 +28,7 @@ nouveau_transfer(struct pipe_transfer *transfer)
return (struct nouveau_transfer *)transfer;
}
static INLINE boolean
static INLINE bool
nouveau_buffer_malloc(struct nv04_resource *buf)
{
if (!buf->data)
......@@ -36,7 +36,7 @@ nouveau_buffer_malloc(struct nv04_resource *buf)
return !!buf->data;
}
static INLINE boolean
static INLINE bool
nouveau_buffer_allocate(struct nouveau_screen *screen,
struct nv04_resource *buf, unsigned domain)
{
......@@ -53,12 +53,12 @@ nouveau_buffer_allocate(struct nouveau_screen *screen,
buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
&buf->bo, &buf->offset);
if (!buf->bo)
return FALSE;
return false;
NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0);
} else {
assert(domain == 0);
if (!nouveau_buffer_malloc(buf))
return FALSE;
return false;
}
buf->domain = domain;
if (buf->bo)
......@@ -66,7 +66,7 @@ nouveau_buffer_allocate(struct nouveau_screen *screen,
util_range_set_empty(&buf->valid_buffer_range);
return TRUE;
return true;
}
static INLINE void
......@@ -93,7 +93,7 @@ nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
buf->domain = 0;
}
static INLINE boolean
static INLINE bool
nouveau_buffer_reallocate(struct nouveau_screen *screen,
struct nv04_resource *buf, unsigned domain)
{
......@@ -134,13 +134,13 @@ nouveau_buffer_destroy(struct pipe_screen *pscreen,
*/
static uint8_t *
nouveau_transfer_staging(struct nouveau_context *nv,
struct nouveau_transfer *tx, boolean permit_pb)
struct nouveau_transfer *tx, bool permit_pb)
{
const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK;
const unsigned size = align(tx->base.box.width, 4) + adj;
if (!nv->push_data)
permit_pb = FALSE;
permit_pb = false;
if ((size <= NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD) && permit_pb) {
tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
......@@ -162,7 +162,7 @@ nouveau_transfer_staging(struct nouveau_context *nv,
* buffer. Also updates buf->data if present.
*
* Maybe just migrate to GART right away if we actually need to do this. */
static boolean
static bool
nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx)
{
struct nv04_resource *buf = nv04_resource(tx->base.resource);
......@@ -175,12 +175,12 @@ nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx)
buf->bo, buf->offset + base, buf->domain, size);
if (nouveau_bo_wait(tx->bo, NOUVEAU_BO_RD, nv->client))
return FALSE;
return false;
if (buf->data)
memcpy(buf->data + base, tx->map, size);
return TRUE;
return true;
}
static void
......@@ -190,7 +190,7 @@ nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx,
struct nv04_resource *buf = nv04_resource(tx->base.resource);
uint8_t *data = tx->map + offset;
const unsigned base = tx->base.box.x + offset;
const boolean can_cb = !((base | size) & 3);
const bool can_cb = !((base | size) & 3);
if (buf->data)
memcpy(data, buf->data + base, size);
......@@ -219,32 +219,32 @@ nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx,
/* Does a CPU wait for the buffer's backing data to become reliably accessible
* for write/read by waiting on the buffer's relevant fences.
*/
static INLINE boolean
static INLINE bool
nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw)
{
if (rw == PIPE_TRANSFER_READ) {
if (!buf->fence_wr)
return TRUE;
return true;
NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
!nouveau_fence_signalled(buf->fence_wr));
if (!nouveau_fence_wait(buf->fence_wr))
return FALSE;
return false;
} else {
if (!buf->fence)
return TRUE;
return true;
NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
!nouveau_fence_signalled(buf->fence));
if (!nouveau_fence_wait(buf->fence))
return FALSE;
return false;
nouveau_fence_ref(NULL, &buf->fence);
}
nouveau_fence_ref(NULL, &buf->fence_wr);
return TRUE;
return true;
}
static INLINE boolean
static INLINE bool
nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
{
if (rw == PIPE_TRANSFER_READ)
......@@ -292,11 +292,11 @@ nouveau_buffer_transfer_del(struct nouveau_context *nv,
}
/* Creates a cache in system memory of the buffer data. */
static boolean
static bool
nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
{
struct nouveau_transfer tx;
boolean ret;
bool ret;
tx.base.resource = &buf->base;
tx.base.box.x = 0;
tx.base.box.width = buf->base.width0;
......@@ -305,13 +305,13 @@ nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
if (!buf->data)
if (!nouveau_buffer_malloc(buf))
return FALSE;
return false;
if (!(buf->status & NOUVEAU_BUFFER_STATUS_DIRTY))
return TRUE;
return true;
nv->stats.buf_cache_count++;
if (!nouveau_transfer_staging(nv, &tx, FALSE))
return FALSE;
if (!nouveau_transfer_staging(nv, &tx, false))
return false;
ret = nouveau_transfer_read(nv, &tx);
if (ret) {
......@@ -330,15 +330,15 @@ nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
* resource. This can be useful if we would otherwise have to wait for a read
* operation to complete on this data.
*/
static INLINE boolean
static INLINE bool
nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage)
{
if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
return FALSE;
return false;
if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
return FALSE;
return false;
if (unlikely(usage & PIPE_TRANSFER_PERSISTENT))
return FALSE;
return false;
return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE);
}
......@@ -408,7 +408,7 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
* back into VRAM on unmap. */
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
nouveau_transfer_staging(nv, tx, TRUE);
nouveau_transfer_staging(nv, tx, true);
} else {
if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
/* The GPU is currently writing to this buffer. Copy its current
......@@ -419,13 +419,13 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
align_free(buf->data);
buf->data = NULL;
}
nouveau_transfer_staging(nv, tx, FALSE);
nouveau_transfer_staging(nv, tx, false);
nouveau_transfer_read(nv, tx);
} else {
/* The buffer is currently idle. Create a staging area for writes,
* and make sure that the cached data is up-to-date. */
if (usage & PIPE_TRANSFER_WRITE)
nouveau_transfer_staging(nv, tx, TRUE);
nouveau_transfer_staging(nv, tx, true);
if (!buf->data)
nouveau_buffer_cache(nv, buf);
}
......@@ -477,7 +477,7 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
/* The whole range is being discarded, so it doesn't matter what was
* there before. No need to copy anything over. */
nouveau_transfer_staging(nv, tx, TRUE);
nouveau_transfer_staging(nv, tx, true);
map = tx->map;
} else
if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
......@@ -488,7 +488,7 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
} else {
/* It is expected that the returned buffer be a representation of the
* data in question, so we must copy it over from the buffer. */
nouveau_transfer_staging(nv, tx, TRUE);
nouveau_transfer_staging(nv, tx, true);
if (tx->map)
memcpy(tx->map, map, box->width);
map = tx->map;
......@@ -539,7 +539,7 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
const uint8_t bind = buf->base.bind;
/* make sure we invalidate dedicated caches */
if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
nv->vbo_dirty = TRUE;
nv->vbo_dirty = true;
}
util_range_add(&buf->valid_buffer_range,
......@@ -634,7 +634,7 @@ nouveau_buffer_create(struct pipe_screen *pscreen,
{
struct nouveau_screen *screen = nouveau_screen(pscreen);
struct nv04_resource *buffer;
boolean ret;
bool ret;
buffer = CALLOC_STRUCT(nv04_resource);
if (!buffer)
......@@ -678,7 +678,7 @@ nouveau_buffer_create(struct pipe_screen *pscreen,
}
ret = nouveau_buffer_allocate(screen, buffer, buffer->domain);
if (ret == FALSE)
if (ret == false)
goto fail;
if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy)
......@@ -725,20 +725,20 @@ nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
return &buffer->base;
}
static INLINE boolean
static INLINE bool
nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf,
struct nouveau_bo *bo, unsigned offset, unsigned size)
{
if (!nouveau_buffer_malloc(buf))
return FALSE;
return false;
if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client))
return FALSE;
return false;
memcpy(buf->data, (uint8_t *)bo->map + offset, size);
return TRUE;
return true;
}
/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
boolean
bool
nouveau_buffer_migrate(struct nouveau_context *nv,
struct nv04_resource *buf, const unsigned new_domain)
{
......@@ -753,7 +753,7 @@ nouveau_buffer_migrate(struct nouveau_context *nv,
if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
if (!nouveau_buffer_allocate(screen, buf, new_domain))
return FALSE;
return false;
ret = nouveau_bo_map(buf->bo, 0, nv->client);
if (ret)
return ret;
......@@ -766,7 +766,7 @@ nouveau_buffer_migrate(struct nouveau_context *nv,
if (new_domain == NOUVEAU_BO_VRAM) {
/* keep a system memory copy of our data in case we hit a fallback */
if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size))
return FALSE;
return false;
if (nouveau_mesa_debug)
debug_printf("migrating %u KiB to VRAM\n", size / 1024);
}
......@@ -787,28 +787,28 @@ nouveau_buffer_migrate(struct nouveau_context *nv,
if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
struct nouveau_transfer tx;
if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
return FALSE;
return false;
tx.base.resource = &buf->base;
tx.base.box.x = 0;
tx.base.box.width = buf->base.width0;
tx.bo = NULL;
tx.map = NULL;
if (!nouveau_transfer_staging(nv, &tx, FALSE))
return FALSE;
if (!nouveau_transfer_staging(nv, &tx, false))
return false;
nouveau_transfer_write(nv, &tx, 0, tx.base.box.width);
nouveau_buffer_transfer_del(nv, &tx);
} else
return FALSE;
return false;
assert(buf->domain == new_domain);
return TRUE;
return true;
}
/* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
* We'd like to only allocate @size bytes here, but then we'd have to rebase
* the vertex indices ...
*/
boolean
bool
nouveau_user_buffer_upload(struct nouveau_context *nv,
struct nv04_resource *buf,
unsigned base, unsigned size)
......@@ -820,14 +820,14 @@ nouveau_user_buffer_upload(struct nouveau_context *nv,
buf->base.width0 = base + size;
if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
return FALSE;
return false;
ret = nouveau_bo_map(buf->bo, 0, nv->client);
if (ret)
return FALSE;
return false;
memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size);
return TRUE;
return true;
}
......@@ -870,7 +870,7 @@ nouveau_scratch_runout_release(struct nouveau_context *nv)
/* Allocate an extra bo if we can't fit everything we need simultaneously.
* (Could happen for very large user arrays.)
*/
static INLINE boolean
static INLINE bool
nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
{
int ret;
......@@ -904,7 +904,7 @@ nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
/* Continue to next scratch buffer, if available (no wrapping, large enough).
* Allocate it if it has not yet been created.
*/
static INLINE boolean
static INLINE bool
nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
{
struct nouveau_bo *bo;
......@@ -912,14 +912,14 @@ nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS;
if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap))
return FALSE;
return false;
nv->scratch.id = i;
bo = nv->scratch.bo[i];
if (!bo) {
ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size);
if (ret)
return FALSE;
return false;
nv->scratch.bo[i] = bo;
}
nv->scratch.current = bo;
......@@ -932,10 +932,10 @@ nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
return !ret;
}
static boolean
static bool
nouveau_scratch_more(struct nouveau_context *nv, unsigned min_size)
{
boolean ret;
bool ret;
ret = nouveau_scratch_next(nv, min_size);
if (!ret)
......
......@@ -58,7 +58,7 @@ nouveau_copy_buffer(struct nouveau_context *,
struct nv04_resource *dst, unsigned dst_pos,
struct nv04_resource *src, unsigned src_pos, unsigned size);
boolean
bool
nouveau_buffer_migrate(struct nouveau_context *,
struct nv04_resource *, unsigned domain);
......@@ -79,7 +79,7 @@ nv04_resource(struct pipe_resource *resource)
}
/* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
static INLINE boolean
static INLINE bool
nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
{
return nv04_resource(resource)->domain != 0;
......@@ -93,7 +93,7 @@ struct pipe_resource *
nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
unsigned bytes, unsigned usage);
boolean
bool
nouveau_user_buffer_upload(struct nouveau_context *, struct nv04_resource *,
unsigned base, unsigned size);
......
......@@ -13,7 +13,7 @@ struct nouveau_context {
struct nouveau_client *client;
struct nouveau_pushbuf *pushbuf;
boolean vbo_dirty;
bool vbo_dirty;
void (*copy_data)(struct nouveau_context *,
struct nouveau_bo *dst, unsigned, unsigned,
......@@ -104,7 +104,7 @@ nouveau_context_update_frame_stats(struct nouveau_context *nv)
nv->stats.buf_cache_count = 0;
nv->stats.buf_cache_frame |= 1;
if ((nv->stats.buf_cache_frame & 0xf) == 0xf)
nv->screen->hint_buf_keep_sysmem_copy = TRUE;
nv->screen->hint_buf_keep_sysmem_copy = true;
}
}
......
......@@ -28,13 +28,13 @@
#include <sched.h>
#endif
boolean
bool
nouveau_fence_new(struct nouveau_screen *screen, struct nouveau_fence **fence,
boolean emit)
bool emit)
{
*fence = CALLOC_STRUCT(nouveau_fence);
if (!*fence)
return FALSE;
return false;
(*fence)->screen = screen;
(*fence)->ref = 1;
......@@ -43,7 +43,7 @@ nouveau_fence_new(struct nouveau_screen *screen, struct nouveau_fence **fence,
if (emit)
nouveau_fence_emit(*fence);
return TRUE;
return true;
}
static void
......@@ -58,7 +58,7 @@ nouveau_fence_trigger_work(struct nouveau_fence *fence)
}
}
boolean
bool
nouveau_fence_work(struct nouveau_fence *fence,
void (*func)(void *), void *data)
{
......@@ -66,16 +66,16 @@ nouveau_fence_work(struct nouveau_fence *fence,
if (!fence || fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) {
func(data);
return TRUE;
return true;
}
work = CALLOC_STRUCT(nouveau_fence_work);
if (!work)
return FALSE;
return false;
work->func = func;
work->data = data;
LIST_ADD(&work->list, &fence->work);
return TRUE;
return true;
}
void
......@@ -132,7 +132,7 @@ nouveau_fence_del(struct nouveau_fence *fence)
}
void
nouveau_fence_update(struct nouveau_screen *screen, boolean flushed)
nouveau_fence_update(struct nouveau_screen *screen, bool flushed)
{
struct nouveau_fence *fence;
struct nouveau_fence *next = NULL;
......@@ -167,21 +167,21 @@ nouveau_fence_update(struct nouveau_screen *screen, boolean flushed)
#define NOUVEAU_FENCE_MAX_SPINS (1 << 31)
boolean
bool
nouveau_fence_signalled(struct nouveau_fence *fence)
{
struct nouveau_screen *screen = fence->screen;
if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED)
return TRUE;
return true;
if (fence->state >= NOUVEAU_FENCE_STATE_EMITTED)
nouveau_fence_update(screen, FALSE);
nouveau_fence_update(screen, false);
return fence->state == NOUVEAU_FENCE_STATE_SIGNALLED;
}
boolean
bool
nouveau_fence_wait(struct nouveau_fence *fence)
{
struct nouveau_screen *screen = fence->screen;
......@@ -195,16 +195,16 @@ nouveau_fence_wait(struct nouveau_fence *fence)
if (fence->state < NOUVEAU_FENCE_STATE_FLUSHED)
if (nouveau_pushbuf_kick(screen->pushbuf, screen->pushbuf->channel))
return FALSE;
return false;
if (fence == screen->fence.current)